diff --git a/.gitignore b/.gitignore new file mode 100644 index 000000000..8faa6c02b --- /dev/null +++ b/.gitignore @@ -0,0 +1,79 @@ +# +# NOTE! Don't add files that are generated in specific +# subdirectories here. Add them in the ".gitignore" file +# in that subdirectory instead. +# +# NOTE! Please use 'git ls-files -i --exclude-standard' +# command after changing this file, to see if there are +# any tracked files which get ignored after the change. +# +# Normal rules +# +.* +*.o +*.o.* +*.a +*.s +*.ko +*.so +*.so.dbg +*.mod.c +*.i +*.lst +*.symtypes +*.order +modules.builtin +*.elf +*.bin +*.gz +*.bz2 +*.lzma +*.lzo +*.patch +*.gcno + +# +# Top-level generic files +# +/tags +/TAGS +/linux +/vmlinux +/vmlinuz +/System.map +/Module.markers +/Module.symvers + +# +# git files that we don't want to ignore even it they are dot-files +# +!.gitignore +!.mailmap + +# +# Generated include files +# +include/config +include/linux/version.h +include/generated + +# stgit generated dirs +patches-* + +# quilt's files +patches +series + +# cscope files +cscope.* +ncscope.* + +# gnu global files +GPATH +GRTAGS +GSYMS +GTAGS + +*.orig +*~ +\#*# diff --git a/Makefile b/Makefile index 3ab214703..bcede986c 100755 --- a/Makefile +++ b/Makefile @@ -192,8 +192,8 @@ SUBARCH := $(shell uname -m | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ \ # Default value for CROSS_COMPILE is not to prefix executables # Note: Some architectures assign CROSS_COMPILE in their arch/*/Makefile export KBUILD_BUILDHOST := $(SUBARCH) -ARCH ?= $(SUBARCH) -CROSS_COMPILE ?= $(CONFIG_CROSS_COMPILE:"%"=%) +ARCH ?= arm +CROSS_COMPILE ?= /home/forumber/prebuilt/linux-x86/toolchain/arm-eabi-4.4.3/bin/arm-eabi- # Architecture as present in compile.h UTS_MACHINE := $(ARCH) diff --git a/arch/arm/configs/u8800_defconfig b/arch/arm/configs/u8800_defconfig index 1e9a7a2c9..e252fda97 100644 --- a/arch/arm/configs/u8800_defconfig +++ b/arch/arm/configs/u8800_defconfig @@ -45,7 +45,7 @@ CONFIG_HAVE_KERNEL_LZO=y CONFIG_KERNEL_LZMA=y # CONFIG_KERNEL_LZO is not set CONFIG_DEFAULT_HOSTNAME="(none)" -# CONFIG_SWAP is not set +CONFIG_SWAP=y CONFIG_SYSVIPC=y CONFIG_SYSVIPC_SYSCTL=y # CONFIG_POSIX_MQUEUE is not set @@ -382,7 +382,7 @@ CONFIG_MSM_DALRPC=y CONFIG_MSM_DALRPC_TEST=m CONFIG_MSM_CPU_FREQ_SET_MIN_MAX=y CONFIG_MSM_CPU_FREQ_MAX=1024000 -CONFIG_MSM_CPU_FREQ_MIN=122880 +CONFIG_MSM_CPU_FREQ_MIN=368640 # CONFIG_MSM_AVS_HW is not set # CONFIG_MSM_HW3D is not set CONFIG_AMSS_7X25_VERSION_2009=y @@ -584,18 +584,21 @@ CONFIG_CPU_FREQ=y CONFIG_CPU_FREQ_TABLE=y CONFIG_CPU_FREQ_STAT=y # CONFIG_CPU_FREQ_STAT_DETAILS is not set -CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set # CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set # CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set # CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set # CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set # CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE is not set +CONFIG_CPU_FREQ_DEFAULT_GOV_SMARTASS2=y CONFIG_CPU_FREQ_GOV_PERFORMANCE=y CONFIG_CPU_FREQ_GOV_POWERSAVE=y CONFIG_CPU_FREQ_GOV_USERSPACE=y CONFIG_CPU_FREQ_GOV_ONDEMAND=y CONFIG_CPU_FREQ_GOV_INTERACTIVE=y CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y +CONFIG_CPU_FREQ_VDD_LEVELS=y +CONFIG_CPU_FREQ_GOV_SMARTASS2=y # CONFIG_CPU_IDLE is not set CONFIG_CPU_FREQ_MSM=y diff --git a/arch/arm/mach-msm/acpuclock-7x30.c b/arch/arm/mach-msm/acpuclock-7x30.c index 8ffe61a4d..8f93fcfc4 100644 --- a/arch/arm/mach-msm/acpuclock-7x30.c +++ b/arch/arm/mach-msm/acpuclock-7x30.c @@ -52,6 +52,8 @@ #define VDD_RAW(mv) (((MV(mv) / V_STEP) - 30) | VREG_DATA) #define MAX_AXI_KHZ 192000 +#define SEMC_ACPU_MIN_UV_MV 750U +#define SEMC_ACPU_MAX_UV_MV 1500U struct clock_state { struct clkctl_acpu_speed *current_speed; @@ -121,20 +123,20 @@ static struct clk *acpuclk_sources[MAX_SOURCE]; static struct clkctl_acpu_speed acpu_freq_tbl[] = { { 0, 24576, LPXO, 0, 0, 30720000, 900, VDD_RAW(900) }, { 0, 61440, PLL_3, 5, 11, 61440000, 900, VDD_RAW(900) }, - { 1, 122880, PLL_3, 5, 5, 61440000, 900, VDD_RAW(900) }, + { 0, 122880, PLL_3, 5, 5, 61440000, 900, VDD_RAW(900) }, { 0, 184320, PLL_3, 5, 4, 61440000, 900, VDD_RAW(900) }, { 0, MAX_AXI_KHZ, AXI, 1, 0, 61440000, 900, VDD_RAW(900) }, - { 1, 245760, PLL_3, 5, 2, 61440000, 900, VDD_RAW(900) }, + { 0, 245760, PLL_3, 5, 2, 61440000, 900, VDD_RAW(900) }, { 1, 368640, PLL_3, 5, 1, 122800000, 900, VDD_RAW(900) }, - { 0, 480000, PLL_2, 3, 0, 122800000, 900, VDD_RAW(900), &pll2_tbl[0]}, - { 0, 600000, PLL_2, 3, 0, 122800000, 925, VDD_RAW(925), &pll2_tbl[1]}, + { 1, 480000, PLL_2, 3, 0, 122800000, 900, VDD_RAW(900), &pll2_tbl[0]}, + { 1, 600000, PLL_2, 3, 0, 122800000, 925, VDD_RAW(925), &pll2_tbl[1]}, /* AXI has MSMC1 implications. See above. */ - { 1, 768000, PLL_1, 2, 0, 153600000, 1050, VDD_RAW(1050) }, + { 1, 768000, PLL_1, 2, 0, 153600000, 975, VDD_RAW(975) }, /* * AXI has MSMC1 implications. See above. */ - { 1, 806400, PLL_2, 3, 0, UINT_MAX, 1100, VDD_RAW(1100), &pll2_tbl[2]}, - { 1, 1024000, PLL_2, 3, 0, UINT_MAX, 1200, VDD_RAW(1200), &pll2_tbl[3]}, + { 1, 806400, PLL_2, 3, 0, UINT_MAX, 1000, VDD_RAW(1000), &pll2_tbl[2]}, + { 1, 1024000, PLL_2, 3, 0, UINT_MAX, 1100, VDD_RAW(1100), &pll2_tbl[3]}, { 1, 1200000, PLL_2, 3, 0, UINT_MAX, 1200, VDD_RAW(1200), &pll2_tbl[4]}, { 1, 1401600, PLL_2, 3, 0, UINT_MAX, 1250, VDD_RAW(1250), &pll2_tbl[5]}, { 1, 1516800, PLL_2, 3, 0, UINT_MAX, 1300, VDD_RAW(1300), &pll2_tbl[6]}, @@ -493,3 +495,42 @@ static int __init acpuclk_7x30_init(struct acpuclk_soc_data *soc_data) struct acpuclk_soc_data acpuclk_7x30_soc_data __initdata = { .init = acpuclk_7x30_init, }; + +#ifdef CONFIG_CPU_FREQ_VDD_LEVELS + +ssize_t acpuclk_get_vdd_levels_str(char *buf) +{ + int i, len = 0; + if (buf) + { + mutex_lock(&drv_state.lock); + for (i = 0; acpu_freq_tbl[i].acpu_clk_khz; i++) + { + len += sprintf(buf + len, "%8u: %4d\n", acpu_freq_tbl[i].acpu_clk_khz, acpu_freq_tbl[i].vdd_mv); + } + mutex_unlock(&drv_state.lock); + } + return len; +} + +void acpuclk_set_vdd(unsigned int khz, int vdd) +{ + int i; + unsigned int new_vdd; + vdd = vdd / V_STEP * V_STEP; + mutex_lock(&drv_state.lock); + for (i = 0; acpu_freq_tbl[i].acpu_clk_khz; i++) + { + if (khz == 0) + new_vdd = min(max((acpu_freq_tbl[i].vdd_mv + vdd), SEMC_ACPU_MIN_UV_MV), SEMC_ACPU_MAX_UV_MV); + else if (acpu_freq_tbl[i].acpu_clk_khz == khz) + new_vdd = min(max((unsigned int)vdd, SEMC_ACPU_MIN_UV_MV), SEMC_ACPU_MAX_UV_MV); + else continue; + + acpu_freq_tbl[i].vdd_mv = new_vdd; + acpu_freq_tbl[i].vdd_raw = VDD_RAW(new_vdd); + } + mutex_unlock(&drv_state.lock); +} + +#endif diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index 194708850..697ce42b6 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig @@ -109,6 +109,13 @@ config CPU_FREQ_DEFAULT_GOV_INTERACTIVE loading your cpufreq low-level hardware driver, using the 'interactive' governor for latency-sensitive workloads. +config CPU_FREQ_DEFAULT_GOV_SMARTASS2 + bool "smartass2" + select CPU_FREQ_GOV_SMARTASS2 + select CPU_FREQ_GOV_PERFORMANCE + help + Use the CPUFreq governor 'smartassV2' as default. + endchoice config CPU_FREQ_GOV_PERFORMANCE @@ -206,6 +213,22 @@ config CPU_FREQ_GOV_CONSERVATIVE If in doubt, say N. +config CPU_FREQ_VDD_LEVELS + bool "CPU Vdd levels sysfs interface" + depends on CPU_FREQ_STAT + depends on ARCH_MSM7X30 + default n + help + CPU Vdd levels sysfs interface + +config CPU_FREQ_GOV_SMARTASS2 + tristate "'smartassV2' cpufreq governor" + depends on CPU_FREQ + help + 'smartassV2' - a "smart" governor + + If in doubt, say N. + menu "x86 CPU frequency scaling drivers" depends on X86 source "drivers/cpufreq/Kconfig.x86" diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index c044060a4..e9261b0cc 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -10,6 +10,7 @@ obj-$(CONFIG_CPU_FREQ_GOV_USERSPACE) += cpufreq_userspace.o obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND) += cpufreq_ondemand.o obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o obj-$(CONFIG_CPU_FREQ_GOV_INTERACTIVE) += cpufreq_interactive.o +obj-$(CONFIG_CPU_FREQ_GOV_SMARTASS2) += cpufreq_smartass2.o # CPUfreq cross-arch helpers obj-$(CONFIG_CPU_FREQ_TABLE) += freq_table.o diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index ff15497e9..7d92421c1 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -557,6 +557,62 @@ static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf) return policy->governor->show_setspeed(policy, buf); } +#ifdef CONFIG_CPU_FREQ_VDD_LEVELS + +extern ssize_t acpuclk_get_vdd_levels_str(char *buf); +static ssize_t show_vdd_levels(struct cpufreq_policy *policy, char *buf) +{ + return acpuclk_get_vdd_levels_str(buf); +} + +extern void acpuclk_set_vdd(unsigned acpu_khz, int vdd); +static ssize_t store_vdd_levels(struct cpufreq_policy *policy, const char *buf, size_t count) +{ + int i = 0, j; + int pair[2] = { 0, 0 }; + int sign = 0; + + if (count < 1) + return 0; + + if (buf[0] == '-') { + sign = -1; + i++; + } else if (buf[0] == '+') { + sign = 1; + i++; + } + + for (j = 0; i < count; i++) { + char c = buf[i]; + if ((c >= '0') && (c <= '9')) { + pair[j] *= 10; + pair[j] += (c - '0'); + } else if ((c == ' ') || (c == '\t')) { + if (pair[j] != 0) { + j++; + if ((sign != 0) || (j > 1)) + break; + } + } + else + break; + } + + if (sign != 0) { + if (pair[0] > 0) + acpuclk_set_vdd(0, sign * pair[0]); + } else { + if ((pair[0] > 0) && (pair[1] > 0)) + acpuclk_set_vdd((unsigned)pair[0], pair[1]); + else + return -EINVAL; + } + return count; +} + +#endif + /** * show_scaling_driver - show the current cpufreq HW/BIOS limitation */ @@ -586,6 +642,9 @@ cpufreq_freq_attr_rw(scaling_min_freq); cpufreq_freq_attr_rw(scaling_max_freq); cpufreq_freq_attr_rw(scaling_governor); cpufreq_freq_attr_rw(scaling_setspeed); +#ifdef CONFIG_CPU_FREQ_VDD_LEVELS +cpufreq_freq_attr_rw(vdd_levels); +#endif static struct attribute *default_attrs[] = { &cpuinfo_min_freq.attr, @@ -599,6 +658,9 @@ static struct attribute *default_attrs[] = { &scaling_driver.attr, &scaling_available_governors.attr, &scaling_setspeed.attr, + #ifdef CONFIG_CPU_FREQ_VDD_LEVELS + &vdd_levels.attr, + #endif NULL }; diff --git a/drivers/cpufreq/cpufreq_smartass2.c b/drivers/cpufreq/cpufreq_smartass2.c new file mode 100644 index 000000000..e00524992 --- /dev/null +++ b/drivers/cpufreq/cpufreq_smartass2.c @@ -0,0 +1,868 @@ +/* + * drivers/cpufreq/cpufreq_smartass2.c + * + * Copyright (C) 2010 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Author: Erasmux + * + * Based on the interactive governor By Mike Chan (mike@android.com) + * which was adaptated to 2.6.29 kernel by Nadlabak (pavel@doshaska.net) + * + * SMP support based on mod by faux123 + * + * For a general overview of smartassV2 see the relavent part in + * Documentation/cpu-freq/governors.txt + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +/******************** Tunable parameters: ********************/ + +/* + * The "ideal" frequency to use when awake. The governor will ramp up faster + * towards the ideal frequency and slower after it has passed it. Similarly, + * lowering the frequency towards the ideal frequency is faster than below it. + */ +#define DEFAULT_AWAKE_IDEAL_FREQ 768000 +static unsigned int awake_ideal_freq; + +/* + * The "ideal" frequency to use when suspended. + * When set to 0, the governor will not track the suspended state (meaning + * that practically when sleep_ideal_freq==0 the awake_ideal_freq is used + * also when suspended). + */ +#define DEFAULT_SLEEP_IDEAL_FREQ 245000 +static unsigned int sleep_ideal_freq; + +/* + * Freqeuncy delta when ramping up above the ideal freqeuncy. + * Zero disables and causes to always jump straight to max frequency. + * When below the ideal freqeuncy we always ramp up to the ideal freq. + */ +#define DEFAULT_RAMP_UP_STEP 256000 +static unsigned int ramp_up_step; + +/* + * Freqeuncy delta when ramping down below the ideal freqeuncy. + * Zero disables and will calculate ramp down according to load heuristic. + * When above the ideal freqeuncy we always ramp down to the ideal freq. + */ +#define DEFAULT_RAMP_DOWN_STEP 256000 +static unsigned int ramp_down_step; + +/* + * CPU freq will be increased if measured load > max_cpu_load; + */ +#define DEFAULT_MAX_CPU_LOAD 50 +static unsigned long max_cpu_load; + +/* + * CPU freq will be decreased if measured load < min_cpu_load; + */ +#define DEFAULT_MIN_CPU_LOAD 25 +static unsigned long min_cpu_load; + +/* + * The minimum amount of time to spend at a frequency before we can ramp up. + * Notice we ignore this when we are below the ideal frequency. + */ +#define DEFAULT_UP_RATE_US 48000; +static unsigned long up_rate_us; + +/* + * The minimum amount of time to spend at a frequency before we can ramp down. + * Notice we ignore this when we are above the ideal frequency. + */ +#define DEFAULT_DOWN_RATE_US 99000; +static unsigned long down_rate_us; + +/* + * The frequency to set when waking up from sleep. + * When sleep_ideal_freq=0 this will have no effect. + */ +#define DEFAULT_SLEEP_WAKEUP_FREQ 1024000 +static unsigned int sleep_wakeup_freq; + +/* + * Sampling rate, I highly recommend to leave it at 2. + */ +#define DEFAULT_SAMPLE_RATE_JIFFIES 2 +static unsigned int sample_rate_jiffies; + + +/*************** End of tunables ***************/ + + +static void (*pm_idle_old)(void); +static atomic_t active_count = ATOMIC_INIT(0); + +struct smartass_info_s { + struct cpufreq_policy *cur_policy; + struct cpufreq_frequency_table *freq_table; + struct timer_list timer; + u64 time_in_idle; + u64 idle_exit_time; + u64 freq_change_time; + u64 freq_change_time_in_idle; + int cur_cpu_load; + int old_freq; + int ramp_dir; + unsigned int enable; + int ideal_speed; +}; +static DEFINE_PER_CPU(struct smartass_info_s, smartass_info); + +/* Workqueues handle frequency scaling */ +static struct workqueue_struct *up_wq; +static struct workqueue_struct *down_wq; +static struct work_struct freq_scale_work; + +static cpumask_t work_cpumask; +static spinlock_t cpumask_lock; + +static unsigned int suspended; + +#define dprintk(flag,msg...) do { \ + if (debug_mask & flag) printk(KERN_DEBUG msg); \ + } while (0) + +enum { + SMARTASS_DEBUG_JUMPS=1, + SMARTASS_DEBUG_LOAD=2, + SMARTASS_DEBUG_ALG=4 +}; + +/* + * Combination of the above debug flags. + */ +static unsigned long debug_mask; + +static int cpufreq_governor_smartass(struct cpufreq_policy *policy, + unsigned int event); + +#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_SMARTASS2 +static +#endif +struct cpufreq_governor cpufreq_gov_smartass2 = { + .name = "smartassV2", + .governor = cpufreq_governor_smartass, + .max_transition_latency = 6000000, + .owner = THIS_MODULE, +}; + +inline static void smartass_update_min_max(struct smartass_info_s *this_smartass, struct cpufreq_policy *policy, int suspend) { + if (suspend) { + this_smartass->ideal_speed = // sleep_ideal_freq; but make sure it obeys the policy min/max + policy->max > sleep_ideal_freq ? + (sleep_ideal_freq > policy->min ? sleep_ideal_freq : policy->min) : policy->max; + } else { + this_smartass->ideal_speed = // awake_ideal_freq; but make sure it obeys the policy min/max + policy->min < awake_ideal_freq ? + (awake_ideal_freq < policy->max ? awake_ideal_freq : policy->max) : policy->min; + } +} + +inline static void smartass_update_min_max_allcpus(void) { + unsigned int i; + for_each_online_cpu(i) { + struct smartass_info_s *this_smartass = &per_cpu(smartass_info, i); + if (this_smartass->enable) + smartass_update_min_max(this_smartass,this_smartass->cur_policy,suspended); + } +} + +inline static unsigned int validate_freq(struct cpufreq_policy *policy, int freq) { + if (freq > (int)policy->max) + return policy->max; + if (freq < (int)policy->min) + return policy->min; + return freq; +} + +inline static void reset_timer(unsigned long cpu, struct smartass_info_s *this_smartass) { + this_smartass->time_in_idle = get_cpu_idle_time_us(cpu, &this_smartass->idle_exit_time); + mod_timer(&this_smartass->timer, jiffies + sample_rate_jiffies); +} + +inline static void work_cpumask_set(unsigned long cpu) { + unsigned long flags; + spin_lock_irqsave(&cpumask_lock, flags); + cpumask_set_cpu(cpu, &work_cpumask); + spin_unlock_irqrestore(&cpumask_lock, flags); +} + +inline static int work_cpumask_test_and_clear(unsigned long cpu) { + unsigned long flags; + int res = 0; + spin_lock_irqsave(&cpumask_lock, flags); + res = cpumask_test_and_clear_cpu(cpu, &work_cpumask); + spin_unlock_irqrestore(&cpumask_lock, flags); + return res; +} + +inline static int target_freq(struct cpufreq_policy *policy, struct smartass_info_s *this_smartass, + int new_freq, int old_freq, int prefered_relation) { + int index, target; + struct cpufreq_frequency_table *table = this_smartass->freq_table; + + if (new_freq == old_freq) + return 0; + new_freq = validate_freq(policy,new_freq); + if (new_freq == old_freq) + return 0; + + if (table && + !cpufreq_frequency_table_target(policy,table,new_freq,prefered_relation,&index)) + { + target = table[index].frequency; + if (target == old_freq) { + // if for example we are ramping up to *at most* current + ramp_up_step + // but there is no such frequency higher than the current, try also + // to ramp up to *at least* current + ramp_up_step. + if (new_freq > old_freq && prefered_relation==CPUFREQ_RELATION_H + && !cpufreq_frequency_table_target(policy,table,new_freq, + CPUFREQ_RELATION_L,&index)) + target = table[index].frequency; + // simlarly for ramping down: + else if (new_freq < old_freq && prefered_relation==CPUFREQ_RELATION_L + && !cpufreq_frequency_table_target(policy,table,new_freq, + CPUFREQ_RELATION_H,&index)) + target = table[index].frequency; + } + + if (target == old_freq) { + // We should not get here: + // If we got here we tried to change to a validated new_freq which is different + // from old_freq, so there is no reason for us to remain at same frequency. + printk(KERN_WARNING "Smartass: frequency change failed: %d to %d => %d\n", + old_freq,new_freq,target); + return 0; + } + } + else target = new_freq; + + __cpufreq_driver_target(policy, target, prefered_relation); + + dprintk(SMARTASS_DEBUG_JUMPS,"SmartassQ: jumping from %d to %d => %d (%d)\n", + old_freq,new_freq,target,policy->cur); + + return target; +} + +static void cpufreq_smartass_timer(unsigned long cpu) +{ + u64 delta_idle; + u64 delta_time; + int cpu_load; + int old_freq; + u64 update_time; + u64 now_idle; + int queued_work = 0; + struct smartass_info_s *this_smartass = &per_cpu(smartass_info, cpu); + struct cpufreq_policy *policy = this_smartass->cur_policy; + + now_idle = get_cpu_idle_time_us(cpu, &update_time); + old_freq = policy->cur; + + if (this_smartass->idle_exit_time == 0 || update_time == this_smartass->idle_exit_time) + return; + + delta_idle = cputime64_sub(now_idle, this_smartass->time_in_idle); + delta_time = cputime64_sub(update_time, this_smartass->idle_exit_time); + + // If timer ran less than 1ms after short-term sample started, retry. + if (delta_time < 1000) { + if (!timer_pending(&this_smartass->timer)) + reset_timer(cpu,this_smartass); + return; + } + + if (delta_idle > delta_time) + cpu_load = 0; + else + cpu_load = 100 * (unsigned int)(delta_time - delta_idle) / (unsigned int)delta_time; + + dprintk(SMARTASS_DEBUG_LOAD,"smartassT @ %d: load %d (delta_time %llu)\n", + old_freq,cpu_load,delta_time); + + this_smartass->cur_cpu_load = cpu_load; + this_smartass->old_freq = old_freq; + + // Scale up if load is above max or if there where no idle cycles since coming out of idle, + // additionally, if we are at or above the ideal_speed, verify we have been at this frequency + // for at least up_rate_us: + if (cpu_load > max_cpu_load || delta_idle == 0) + { + if (old_freq < policy->max && + (old_freq < this_smartass->ideal_speed || delta_idle == 0 || + cputime64_sub(update_time, this_smartass->freq_change_time) >= up_rate_us)) + { + dprintk(SMARTASS_DEBUG_ALG,"smartassT @ %d ramp up: load %d (delta_idle %llu)\n", + old_freq,cpu_load,delta_idle); + this_smartass->ramp_dir = 1; + work_cpumask_set(cpu); + queue_work(up_wq, &freq_scale_work); + queued_work = 1; + } + else this_smartass->ramp_dir = 0; + } + // Similarly for scale down: load should be below min and if we are at or below ideal + // frequency we require that we have been at this frequency for at least down_rate_us: + else if (cpu_load < min_cpu_load && old_freq > policy->min && + (old_freq > this_smartass->ideal_speed || + cputime64_sub(update_time, this_smartass->freq_change_time) >= down_rate_us)) + { + dprintk(SMARTASS_DEBUG_ALG,"smartassT @ %d ramp down: load %d (delta_idle %llu)\n", + old_freq,cpu_load,delta_idle); + this_smartass->ramp_dir = -1; + work_cpumask_set(cpu); + queue_work(down_wq, &freq_scale_work); + queued_work = 1; + } + else this_smartass->ramp_dir = 0; + + // To avoid unnecessary load when the CPU is already at high load, we don't + // reset ourselves if we are at max speed. If and when there are idle cycles, + // the idle loop will activate the timer. + // Additionally, if we queued some work, the work task will reset the timer + // after it has done its adjustments. + if (!queued_work && old_freq < policy->max) + reset_timer(cpu,this_smartass); +} + +static void cpufreq_idle(void) +{ + struct smartass_info_s *this_smartass = &per_cpu(smartass_info, smp_processor_id()); + struct cpufreq_policy *policy = this_smartass->cur_policy; + + if (!this_smartass->enable) { + pm_idle_old(); + return; + } + + if (policy->cur == policy->min && timer_pending(&this_smartass->timer)) + del_timer(&this_smartass->timer); + + pm_idle_old(); + + if (!timer_pending(&this_smartass->timer)) + reset_timer(smp_processor_id(), this_smartass); +} + +/* We use the same work function to sale up and down */ +static void cpufreq_smartass_freq_change_time_work(struct work_struct *work) +{ + unsigned int cpu; + int new_freq; + int old_freq; + int ramp_dir; + struct smartass_info_s *this_smartass; + struct cpufreq_policy *policy; + unsigned int relation = CPUFREQ_RELATION_L; + for_each_possible_cpu(cpu) { + this_smartass = &per_cpu(smartass_info, cpu); + if (!work_cpumask_test_and_clear(cpu)) + continue; + + ramp_dir = this_smartass->ramp_dir; + this_smartass->ramp_dir = 0; + + old_freq = this_smartass->old_freq; + policy = this_smartass->cur_policy; + + if (old_freq != policy->cur) { + // frequency was changed by someone else? + printk(KERN_WARNING "Smartass: frequency changed by 3rd party: %d to %d\n", + old_freq,policy->cur); + new_freq = old_freq; + } + else if (ramp_dir > 0 && nr_running() > 1) { + // ramp up logic: + if (old_freq < this_smartass->ideal_speed) + new_freq = this_smartass->ideal_speed; + else if (ramp_up_step) { + new_freq = old_freq + ramp_up_step; + relation = CPUFREQ_RELATION_H; + } + else { + new_freq = policy->max; + relation = CPUFREQ_RELATION_H; + } + dprintk(SMARTASS_DEBUG_ALG,"smartassQ @ %d ramp up: ramp_dir=%d ideal=%d\n", + old_freq,ramp_dir,this_smartass->ideal_speed); + } + else if (ramp_dir < 0) { + // ramp down logic: + if (old_freq > this_smartass->ideal_speed) { + new_freq = this_smartass->ideal_speed; + relation = CPUFREQ_RELATION_H; + } + else if (ramp_down_step) + new_freq = old_freq - ramp_down_step; + else { + // Load heuristics: Adjust new_freq such that, assuming a linear + // scaling of load vs. frequency, the load in the new frequency + // will be max_cpu_load: + new_freq = old_freq * this_smartass->cur_cpu_load / max_cpu_load; + if (new_freq > old_freq) // min_cpu_load > max_cpu_load ?! + new_freq = old_freq -1; + } + dprintk(SMARTASS_DEBUG_ALG,"smartassQ @ %d ramp down: ramp_dir=%d ideal=%d\n", + old_freq,ramp_dir,this_smartass->ideal_speed); + } + else { // ramp_dir==0 ?! Could the timer change its mind about a queued ramp up/down + // before the work task gets to run? + // This may also happen if we refused to ramp up because the nr_running()==1 + new_freq = old_freq; + dprintk(SMARTASS_DEBUG_ALG,"smartassQ @ %d nothing: ramp_dir=%d nr_running=%lu\n", + old_freq,ramp_dir,nr_running()); + } + + // do actual ramp up (returns 0, if frequency change failed): + new_freq = target_freq(policy,this_smartass,new_freq,old_freq,relation); + if (new_freq) + this_smartass->freq_change_time_in_idle = + get_cpu_idle_time_us(cpu,&this_smartass->freq_change_time); + + // reset timer: + if (new_freq < policy->max) + reset_timer(cpu,this_smartass); + // if we are maxed out, it is pointless to use the timer + // (idle cycles wake up the timer when the timer comes) + else if (timer_pending(&this_smartass->timer)) + del_timer(&this_smartass->timer); + } +} + +static ssize_t show_debug_mask(struct kobject *kobj, struct attribute *attr, char *buf) +{ + return sprintf(buf, "%lu\n", debug_mask); +} + +static ssize_t store_debug_mask(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) +{ + ssize_t res; + unsigned long input; + res = strict_strtoul(buf, 0, &input); + if (res >= 0) + debug_mask = input; + return res; +} + +static ssize_t show_up_rate_us(struct kobject *kobj, struct attribute *attr, char *buf) +{ + return sprintf(buf, "%lu\n", up_rate_us); +} + +static ssize_t store_up_rate_us(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) +{ + ssize_t res; + unsigned long input; + res = strict_strtoul(buf, 0, &input); + if (res >= 0 && input >= 0 && input <= 100000000) + up_rate_us = input; + return res; +} + +static ssize_t show_down_rate_us(struct kobject *kobj, struct attribute *attr, char *buf) +{ + return sprintf(buf, "%lu\n", down_rate_us); +} + +static ssize_t store_down_rate_us(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) +{ + ssize_t res; + unsigned long input; + res = strict_strtoul(buf, 0, &input); + if (res >= 0 && input >= 0 && input <= 100000000) + down_rate_us = input; + return res; +} + +static ssize_t show_sleep_ideal_freq(struct kobject *kobj, struct attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", sleep_ideal_freq); +} + +static ssize_t store_sleep_ideal_freq(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) +{ + ssize_t res; + unsigned long input; + res = strict_strtoul(buf, 0, &input); + if (res >= 0 && input >= 0) { + sleep_ideal_freq = input; + if (suspended) + smartass_update_min_max_allcpus(); + } + return res; +} + +static ssize_t show_sleep_wakeup_freq(struct kobject *kobj, struct attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", sleep_wakeup_freq); +} + +static ssize_t store_sleep_wakeup_freq(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) +{ + ssize_t res; + unsigned long input; + res = strict_strtoul(buf, 0, &input); + if (res >= 0 && input >= 0) + sleep_wakeup_freq = input; + return res; +} + +static ssize_t show_awake_ideal_freq(struct kobject *kobj, struct attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", awake_ideal_freq); +} + +static ssize_t store_awake_ideal_freq(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) +{ + ssize_t res; + unsigned long input; + res = strict_strtoul(buf, 0, &input); + if (res >= 0 && input >= 0) { + awake_ideal_freq = input; + if (!suspended) + smartass_update_min_max_allcpus(); + } + return res; +} + +static ssize_t show_sample_rate_jiffies(struct kobject *kobj, struct attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", sample_rate_jiffies); +} + +static ssize_t store_sample_rate_jiffies(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) +{ + ssize_t res; + unsigned long input; + res = strict_strtoul(buf, 0, &input); + if (res >= 0 && input > 0 && input <= 1000) + sample_rate_jiffies = input; + return res; +} + +static ssize_t show_ramp_up_step(struct kobject *kobj, struct attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", ramp_up_step); +} + +static ssize_t store_ramp_up_step(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) +{ + ssize_t res; + unsigned long input; + res = strict_strtoul(buf, 0, &input); + if (res >= 0 && input >= 0) + ramp_up_step = input; + return res; +} + +static ssize_t show_ramp_down_step(struct kobject *kobj, struct attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", ramp_down_step); +} + +static ssize_t store_ramp_down_step(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) +{ + ssize_t res; + unsigned long input; + res = strict_strtoul(buf, 0, &input); + if (res >= 0 && input >= 0) + ramp_down_step = input; + return res; +} + +static ssize_t show_max_cpu_load(struct kobject *kobj, struct attribute *attr, char *buf) +{ + return sprintf(buf, "%lu\n", max_cpu_load); +} + +static ssize_t store_max_cpu_load(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) +{ + ssize_t res; + unsigned long input; + res = strict_strtoul(buf, 0, &input); + if (res >= 0 && input > 0 && input <= 100) + max_cpu_load = input; + return res; +} + +static ssize_t show_min_cpu_load(struct kobject *kobj, struct attribute *attr, char *buf) +{ + return sprintf(buf, "%lu\n", min_cpu_load); +} + +static ssize_t store_min_cpu_load(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) +{ + ssize_t res; + unsigned long input; + res = strict_strtoul(buf, 0, &input); + if (res >= 0 && input > 0 && input < 100) + min_cpu_load = input; + return res; +} + +#define define_global_rw_attr(_name) \ +static struct global_attr _name##_attr = \ + __ATTR(_name, 0644, show_##_name, store_##_name) + +define_global_rw_attr(debug_mask); +define_global_rw_attr(up_rate_us); +define_global_rw_attr(down_rate_us); +define_global_rw_attr(sleep_ideal_freq); +define_global_rw_attr(sleep_wakeup_freq); +define_global_rw_attr(awake_ideal_freq); +define_global_rw_attr(sample_rate_jiffies); +define_global_rw_attr(ramp_up_step); +define_global_rw_attr(ramp_down_step); +define_global_rw_attr(max_cpu_load); +define_global_rw_attr(min_cpu_load); + +static struct attribute * smartass_attributes[] = { + &debug_mask_attr.attr, + &up_rate_us_attr.attr, + &down_rate_us_attr.attr, + &sleep_ideal_freq_attr.attr, + &sleep_wakeup_freq_attr.attr, + &awake_ideal_freq_attr.attr, + &sample_rate_jiffies_attr.attr, + &ramp_up_step_attr.attr, + &ramp_down_step_attr.attr, + &max_cpu_load_attr.attr, + &min_cpu_load_attr.attr, + NULL, +}; + +static struct attribute_group smartass_attr_group = { + .attrs = smartass_attributes, + .name = "smartass", +}; + +static int cpufreq_governor_smartass(struct cpufreq_policy *new_policy, + unsigned int event) +{ + unsigned int cpu = new_policy->cpu; + int rc; + struct smartass_info_s *this_smartass = &per_cpu(smartass_info, cpu); + + switch (event) { + case CPUFREQ_GOV_START: + if ((!cpu_online(cpu)) || (!new_policy->cur)) + return -EINVAL; + + this_smartass->cur_policy = new_policy; + + this_smartass->enable = 1; + + smartass_update_min_max(this_smartass,new_policy,suspended); + + this_smartass->freq_table = cpufreq_frequency_get_table(cpu); + if (!this_smartass->freq_table) + printk(KERN_WARNING "Smartass: no frequency table for cpu %d?!\n",cpu); + + smp_wmb(); + + // Do not register the idle hook and create sysfs + // entries if we have already done so. + if (atomic_inc_return(&active_count) <= 1) { + rc = sysfs_create_group(cpufreq_global_kobject, + &smartass_attr_group); + if (rc) + return rc; + + pm_idle_old = pm_idle; + pm_idle = cpufreq_idle; + } + + if (this_smartass->cur_policy->cur < new_policy->max && !timer_pending(&this_smartass->timer)) + reset_timer(cpu,this_smartass); + + break; + + case CPUFREQ_GOV_LIMITS: + smartass_update_min_max(this_smartass,new_policy,suspended); + + if (this_smartass->cur_policy->cur > new_policy->max) { + dprintk(SMARTASS_DEBUG_JUMPS,"SmartassI: jumping to new max freq: %d\n",new_policy->max); + __cpufreq_driver_target(this_smartass->cur_policy, + new_policy->max, CPUFREQ_RELATION_H); + } + else if (this_smartass->cur_policy->cur < new_policy->min) { + dprintk(SMARTASS_DEBUG_JUMPS,"SmartassI: jumping to new min freq: %d\n",new_policy->min); + __cpufreq_driver_target(this_smartass->cur_policy, + new_policy->min, CPUFREQ_RELATION_L); + } + + if (this_smartass->cur_policy->cur < new_policy->max && !timer_pending(&this_smartass->timer)) + reset_timer(cpu,this_smartass); + + break; + + case CPUFREQ_GOV_STOP: + this_smartass->enable = 0; + smp_wmb(); + del_timer(&this_smartass->timer); + flush_work(&freq_scale_work); + this_smartass->idle_exit_time = 0; + + if (atomic_dec_return(&active_count) <= 1) { + sysfs_remove_group(cpufreq_global_kobject, + &smartass_attr_group); + pm_idle = pm_idle_old; + } + break; + } + + return 0; +} + +static void smartass_suspend(int cpu, int suspend) +{ + struct smartass_info_s *this_smartass = &per_cpu(smartass_info, smp_processor_id()); + struct cpufreq_policy *policy = this_smartass->cur_policy; + unsigned int new_freq; + + if (!this_smartass->enable) + return; + + smartass_update_min_max(this_smartass,policy,suspend); + if (!suspend) { // resume at max speed: + new_freq = validate_freq(policy,sleep_wakeup_freq); + + dprintk(SMARTASS_DEBUG_JUMPS,"SmartassS: awaking at %d\n",new_freq); + + __cpufreq_driver_target(policy, new_freq, + CPUFREQ_RELATION_L); + } else { + // to avoid wakeup issues with quick sleep/wakeup don't change actual frequency when entering sleep + // to allow some time to settle down. Instead we just reset our statistics (and reset the timer). + // Eventually, the timer will adjust the frequency if necessary. + + this_smartass->freq_change_time_in_idle = + get_cpu_idle_time_us(cpu,&this_smartass->freq_change_time); + + dprintk(SMARTASS_DEBUG_JUMPS,"SmartassS: suspending at %d\n",policy->cur); + } + + reset_timer(smp_processor_id(),this_smartass); +} + +static void smartass_early_suspend(struct early_suspend *handler) { + int i; + if (suspended || sleep_ideal_freq==0) // disable behavior for sleep_ideal_freq==0 + return; + suspended = 1; + for_each_online_cpu(i) + smartass_suspend(i,1); +} + +static void smartass_late_resume(struct early_suspend *handler) { + int i; + if (!suspended) // already not suspended so nothing to do + return; + suspended = 0; + for_each_online_cpu(i) + smartass_suspend(i,0); +} + +static struct early_suspend smartass_power_suspend = { + .suspend = smartass_early_suspend, + .resume = smartass_late_resume, +#ifdef CONFIG_MACH_HERO + .level = EARLY_SUSPEND_LEVEL_DISABLE_FB + 1, +#endif +}; + +static int __init cpufreq_smartass_init(void) +{ + unsigned int i; + struct smartass_info_s *this_smartass; + debug_mask = 0; + up_rate_us = DEFAULT_UP_RATE_US; + down_rate_us = DEFAULT_DOWN_RATE_US; + sleep_ideal_freq = DEFAULT_SLEEP_IDEAL_FREQ; + sleep_wakeup_freq = DEFAULT_SLEEP_WAKEUP_FREQ; + awake_ideal_freq = DEFAULT_AWAKE_IDEAL_FREQ; + sample_rate_jiffies = DEFAULT_SAMPLE_RATE_JIFFIES; + ramp_up_step = DEFAULT_RAMP_UP_STEP; + ramp_down_step = DEFAULT_RAMP_DOWN_STEP; + max_cpu_load = DEFAULT_MAX_CPU_LOAD; + min_cpu_load = DEFAULT_MIN_CPU_LOAD; + + spin_lock_init(&cpumask_lock); + + suspended = 0; + + /* Initalize per-cpu data: */ + for_each_possible_cpu(i) { + this_smartass = &per_cpu(smartass_info, i); + this_smartass->enable = 0; + this_smartass->cur_policy = 0; + this_smartass->ramp_dir = 0; + this_smartass->time_in_idle = 0; + this_smartass->idle_exit_time = 0; + this_smartass->freq_change_time = 0; + this_smartass->freq_change_time_in_idle = 0; + this_smartass->cur_cpu_load = 0; + // intialize timer: + init_timer_deferrable(&this_smartass->timer); + this_smartass->timer.function = cpufreq_smartass_timer; + this_smartass->timer.data = i; + work_cpumask_test_and_clear(i); + } + + // Scale up is high priority + up_wq = alloc_workqueue("ksmartass_up", WQ_HIGHPRI, 1); + down_wq = alloc_workqueue("ksmartass_down", 0, 1); + if (!up_wq || !down_wq) + return -ENOMEM; + + INIT_WORK(&freq_scale_work, cpufreq_smartass_freq_change_time_work); + + register_early_suspend(&smartass_power_suspend); + + return cpufreq_register_governor(&cpufreq_gov_smartass2); +} + +#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_SMARTASS2 +fs_initcall(cpufreq_smartass_init); +#else +module_init(cpufreq_smartass_init); +#endif + +static void __exit cpufreq_smartass_exit(void) +{ + cpufreq_unregister_governor(&cpufreq_gov_smartass2); + destroy_workqueue(up_wq); + destroy_workqueue(down_wq); +} + +module_exit(cpufreq_smartass_exit); + +MODULE_AUTHOR ("Erasmux"); +MODULE_DESCRIPTION ("'cpufreq_smartass2' - A smart cpufreq governor"); +MODULE_LICENSE ("GPL"); diff --git a/drivers/input/touchscreen/atmel_i2c_rmi_QT602240.c b/drivers/input/touchscreen/atmel_i2c_rmi_QT602240.c index d6a9173f8..bbb8a64fc 100755 --- a/drivers/input/touchscreen/atmel_i2c_rmi_QT602240.c +++ b/drivers/input/touchscreen/atmel_i2c_rmi_QT602240.c @@ -114,12 +114,13 @@ and the height of the key region is 8.5mm, TS_Y_MAX * 8.5 /91.5 */ #define EXTRA_MAX_TOUCH_KEY 4 #define TS_KEY_DEBOUNCE_TIMER_MS 60 -static int vibrate=30; +static int vibrate=20; module_param(vibrate, int, 00644); void msm_timed_vibrate(int); + /* to define a region of touch panel */ typedef struct { @@ -322,8 +323,7 @@ static u8 atmel_timer = 0; #define DISABLE 0 /* < DTS2011062404739 cuiyu 20110624 begin */ -static uint32_t resume_time = 0; -static u8 cal_check_flag = 1; +static u8 cal_check_flag = 0; /* DTS2011062404739 cuiyu 20110624 end > */ /* DTS2010083103149 zhangtao 20100909 end > */ @@ -1061,7 +1061,7 @@ int write_multitouchscreen_config(u8 instance,int flag) *(tmp + 11) = 3; //movhysti /* < DTS2011042106137 zhangtao 20110509 begin */ /* make the point report every pix */ - *(tmp + 12) = 1; //movhystn + *(tmp + 12) = 3; //movhystn /* DTS2011042106137 zhangtao 20110509 end > */ *(tmp + 13) = 0;//0x2e; //movfilter *(tmp + 14) = 2; //numtouch @@ -1196,7 +1196,7 @@ int write_gripfacesuppression_config(u8 instance) /* < DTS2010083103149 zhangtao 20100909 begin */ /* < DTS2011042106137 zhangtao 20110509 begin */ /* turn off the fripfacesuppression */ - *(tmp + 0) = 0x00; //0x05; //ctrl + *(tmp + 0) = 0x07; //0x05; //ctrl /* DTS2011042106137 zhangtao 20110509 end > */ /* < DTS2010073101113 zhangtao 20100819 begin */ *(tmp + 1) = 0; //xlogrip @@ -1272,7 +1272,7 @@ int write_noisesuppression_config(u8 instance) *(tmp + 6) = 0xff; //GCAFLL *(tmp + 7) = 4; //actvgcafvalid /* < DTS2011062404739 cuiyu 20110624 begin */ - *(tmp + 8) = 30; //noisethr + *(tmp + 8) = 20; //noisethr /* DTS2011062404739 cuiyu 20110624 end > */ *(tmp + 9) = 0; //reserved *(tmp + 10) = 0; //freqhopscale @@ -1889,7 +1889,15 @@ void check_chip_calibration(void) /* Process counters and decide if cal was good or if we must re-calibrate. */ /* < DTS2011062404739 cuiyu 20110624 begin */ /* check error */ - if(atch_ch > 0) + if((tch_ch) && (atch_ch == 0)) + { + /* Calibration may be good */ + cal_maybe_good(); + TS_DEBUG_TS("the func cal_maybe_good is used!\n"); + } + /* CAL_THR is configurable. A starting value of 10 to 20 is suggested. + * * This can then be tuned for the particular design. */ + else if((tch_ch - 25) <= atch_ch && (tch_ch || atch_ch)) /* DTS2011062404739 cuiyu 20110624 end > */ { /* Calibration was bad - must recalibrate and check afterwards. */ @@ -1914,46 +1922,48 @@ void check_chip_calibration(void) } /* < DTS2011062404739 cuiyu 20110624 begin */ /* check point */ -static int check_too_many_point(int num_i, int *x_record) -{ - - while(num_i > 0) - { - if((x_record[num_i] >= x_record[0] - 2) && (x_record[num_i] <= x_record[0] + 2)) - { - num_i--; - continue; - } - else - { - return 1; // no too many point - } - } - return -1; -} /* DTS2011062404739 cuiyu 20110624 end > */ void cal_maybe_good(void) { int ret; - /* < DTS2011062404739 cuiyu 20110624 begin */ - uint8_t data = 1u; - /* shut down */ - if(cal_check_flag == 0) + /* Check if the timer is enabled */ + if(atmel_timer == ENABLE) { - /* shut down calibration */ - if(1 == write_acquisition_config(0, 0)) + TS_DEBUG_TS("cal_maybe_good: the current time is %lu\n", jiffies); + if((jiffies - timer_tick) /10 > 5) /* Check if the timer timedout of 0.5seconds */ + { + /* Cal was good - don't need to check any more */ + cal_check_flag = 0; + /* Disable the timer */ + atmel_timer = DISABLE; + timer_tick = 0; + /* Write back the normal acquisition config to chip. */ + if (1 == write_acquisition_config(0,0)) + { + /* "Acquisition config write failed!\n" */ + + printk("\n[ERROR] line : %d\n", __LINE__); + } + + ret = write_multitouchscreen_config(0,1); + + printk("the cal_maybe_good is ok! the ret is %d\n",ret); + } + else { - /* Acquisition config write failed!\n */ - TS_DEBUG_TS("\n[ERROR] line : %d\n", __LINE__); + cal_check_flag = 1u; + TS_DEBUG_TS("the time is not yet!\n"); } - ret = write_multitouchscreen_config(0, 1); - msleep(50); - ret = write_mem(command_processor_address + CALIBRATE_OFFSET, 1, &data); - TS_DEBUG_TS("the cal_maybe_good is ok! the ret is %d\n", ret); } - /* DTS2011062404739 cuiyu 20110624 end > */ + else + { + /* Timer not enabled, so enable it */ + atmel_timer = ENABLE; // enable for 100ms timer + timer_tick = jiffies; + cal_check_flag = 1u; + TS_DEBUG_TS("the cal_maybe_good is enable time!\n"); + } } -/* DTS2010083103149 zhangtao 20100909 end > */ /* < DTS2010062400225 zhangtao 20100624 begin */ static int atmel_ts_initchip(void) @@ -2076,15 +2086,6 @@ static void atmel_ts_work_func(struct work_struct *work) static char first_point_id = 1; static int point_1_x; static int point_1_y; - /* < DTS2011062404739 cuiyu 20110624 begin */ - static int first_in_point = 0; - static int point_1_x_first_down; - static int point_1_y_first_down; - static int num_1; - static int num_2; - static int x_record1[10]; - static int x_record2[5]; - /* DTS2011062404739 cuiyu 20110624 end > */ static int point_1_amplitude; static int point_1_width; static int point_2_x; @@ -2193,35 +2194,6 @@ static void atmel_ts_work_func(struct work_struct *work) point_1_y = ts->touch_y; point_1_amplitude = ts->touchamplitude; point_1_width = ts->sizeoftouch; - /* < DTS2011062404739 cuiyu 20110624 begin */ - /* record point */ - if((cal_check_flag != 0) && !(first_in_point)) - { - first_in_point = 1; - num_1 = 0; - point_1_x_first_down = point_1_x; - point_1_y_first_down = point_1_y; - } - - /* timeout or not */ - if(jiffies - resume_time < 6000) - { - x_record1[num_1] = point_1_x; - if(num_1 >= 9) - { - /* check point */ - if(check_too_many_point(num_1, x_record1) == -1) - { - cal_check_flag = 1; - } - num_1 = 0; - } - else - { - num_1++; - } - } - /* DTS2011062404739 cuiyu 20110624 end > */ } else { @@ -2230,46 +2202,12 @@ static void atmel_ts_work_func(struct work_struct *work) point_2_y = ts->touch_y; point_2_amplitude = ts->touchamplitude; point_2_width = ts->sizeoftouch; - /* < DTS2011062404739 cuiyu 20110624 begin */ - /* timeout or not */ - if(jiffies - resume_time < 6000) - { - x_record2[num_2] = point_2_x; - if(num_2 >= 4) - { - /* check point */ - if(check_too_many_point(num_2, x_record2) == -1) - { - cal_check_flag = 1; - } - num_2 = 0; - } - else - { - num_2++; - } - } - /* DTS2011062404739 cuiyu 20110624 end > */ } } else { if(1 == point_index) { - /* < DTS2011062404739 cuiyu 20110624 begin */ - if(cal_check_flag == 1 && (second_point_pressed == FALSE)) - { - if(((abs(ts->touch_x - point_1_x_first_down) > 100 || abs(ts->touch_y - point_1_y_first_down) > 100) - || jiffies - resume_time > 6000)) - { - /* it is all good */ - cal_maybe_good(); - cal_check_flag = 0; - } - first_in_point = 0; - } - /* DTS2011062404739 cuiyu 20110624 end > */ - /*if index-1 released, index-2 point remains working*/ first_point_id = 2; } @@ -2399,6 +2337,7 @@ static void atmel_ts_work_func(struct work_struct *work) input_report_key(ts->key_input, key_tmp, 0); key_pressed1 = 0; + msm_timed_vibrate(vibrate); ATMEL_DBG_MASK("when the key is released report!\n"); } } @@ -2407,7 +2346,6 @@ static void atmel_ts_work_func(struct work_struct *work) if(0 == key_pressed1) { input_report_key(ts->key_input, key_tmp, 1); - msm_timed_vibrate(vibrate); key_pressed1 = 1; ATMEL_DBG_MASK("the key is pressed report!\n"); } @@ -2457,7 +2395,7 @@ static void atmel_ts_work_func(struct work_struct *work) if (ts->test > 0) key_pressed = KEY_BRL_DOT1; else - key_pressed = KEY_SEARCH; + key_pressed = KEY_SEARCH; touch_input_report_key(ts, key_pressed, 1); input_sync(ts->input_dev); msm_timed_vibrate(vibrate); @@ -2502,8 +2440,8 @@ static void atmel_ts_work_func(struct work_struct *work) default: break; } - - + + break; /* < DTS2010083103149 zhangtao 20100909 begin */ case PROCG_GRIPFACESUPPRESSION_T20: @@ -2515,7 +2453,7 @@ static void atmel_ts_work_func(struct work_struct *work) break; /* DTS2010083103149 zhangtao 20100909 end > */ - + default: TS_DEBUG_TS("T%d detect\n", obj); break; @@ -2576,7 +2514,7 @@ static int atmel_ts_probe( goto err_power_on_failed; /* */ if (ret) goto err_power_on_failed; @@ -2948,7 +2886,8 @@ goto succeed_find_device; { /* < DTS2011052101089 shenjinming 20110521 begin */ /* can't use the flag ret here, it will change the return value of probe function */ - vreg_disable(v_gp4); + ret = vreg_disable(v_gp4); + printk(KERN_ERR "the atmel's power is off: gp4 = %d \n ", ret); /* delete a line */ /* DTS2011052101089 shenjinming 20110521 end > */ } @@ -3020,11 +2959,6 @@ static int atmel_ts_resume(struct i2c_client *client) write_power_config(1); /* < DTS2010083103149 zhangtao 20100909 begin */ calibrate_chip_error(); -/* DTS2010083103149 zhangtao 20100909 end > */ - /* < DTS2011062404739 cuiyu 20110624 begin */ - cal_check_flag = 1; - resume_time = jiffies; - /* DTS2011062404739 cuiyu 20110624 end > */ if (ts->use_irq) { enable_irq(client->irq); diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 957c5b414..874922d13 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -363,6 +363,9 @@ extern struct cpufreq_governor cpufreq_gov_conservative; #elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE) extern struct cpufreq_governor cpufreq_gov_interactive; #define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_interactive) +#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_SMARTASS2) +extern struct cpufreq_governor cpufreq_gov_smartass2; +#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_smartass2) #endif diff --git a/scripts/mkcompile_h b/scripts/mkcompile_h index d5428594f..a7937dbbd 100755 --- a/scripts/mkcompile_h +++ b/scripts/mkcompile_h @@ -75,8 +75,8 @@ UTS_TRUNCATE="cut -b -$UTS_LEN" #/* < DTS2011052606009 jiaxianghong 20110527 begin */ #/* < DTS2011030103387 niguodong 20110415 begin */ - echo \#define LINUX_COMPILE_BY \"dzo\" - echo \#define LINUX_COMPILE_HOST \"martin\" + echo \#define LINUX_COMPILE_BY \"forumber\" + echo \#define LINUX_COMPILE_HOST \"dzo\" #/* DTS2011030103387 niguodong 20110415 end > */ #/* < DTS2011052606009 jiaxianghong 20110527 end */