From dfdbf3f6e2d279f2a46ed95614cb4bf07657394d Mon Sep 17 00:00:00 2001 From: Ian Wisbon Date: Tue, 15 Feb 2011 15:53:51 -0500 Subject: Digi del-5.6 Complete --- arch/arm/mach-mx23/clock.c | 962 +++++++++++++++++++++++++++++++++++---------- 1 file changed, 744 insertions(+), 218 deletions(-) (limited to 'arch/arm/mach-mx23/clock.c') diff --git a/arch/arm/mach-mx23/clock.c b/arch/arm/mach-mx23/clock.c index 957a70213399..9e18dbc74337 100644 --- a/arch/arm/mach-mx23/clock.c +++ b/arch/arm/mach-mx23/clock.c @@ -18,10 +18,12 @@ #include #include +#include #include #include #include #include +#include #include #include @@ -29,17 +31,130 @@ #include "regs-clkctrl.h" #include "regs-digctl.h" +#include #include #define CLKCTRL_BASE_ADDR IO_ADDRESS(CLKCTRL_PHYS_ADDR) #define DIGCTRL_BASE_ADDR IO_ADDRESS(DIGCTL_PHYS_ADDR) +#define RTC_BASE_ADDR IO_ADDRESS(RTC_PHYS_ADDR) + +/* these are the maximum clock speeds that have been + * validated to run at the minumum VddD target voltage level for cpu operation + * (presently 1.05V target, .975V Brownout). Higher clock speeds for GPMI and + * SSP have not been validated. + */ +#define PLL_ENABLED_MAX_CLK_SSP 96000000 +#define PLL_ENABLED_MAX_CLK_GPMI 96000000 + /* external clock input */ -static struct clk xtal_clk[]; -static unsigned long xtal_clk_rate[3] = { 24000000, 24000000, 32000 }; +static struct clk pll_clk; +static struct clk ref_xtal_clk; + +#ifdef DEBUG +static void print_ref_counts(void); +#endif static unsigned long enet_mii_phy_rate; +static inline int clk_is_busy(struct clk *clk) +{ + if ((clk->parent == &ref_xtal_clk) && (clk->xtal_busy_bits)) + return __raw_readl(clk->busy_reg) & (1 << clk->xtal_busy_bits); + else if (clk->busy_bits && clk->busy_reg) + return __raw_readl(clk->busy_reg) & (1 << clk->busy_bits); + else { + printk(KERN_ERR "WARNING: clock has no assigned busy \ + register or bits\n"); + udelay(10); + return 0; + } +} + +static inline int clk_busy_wait(struct clk *clk) +{ + int i; + + for (i = 10000000; i; i--) + if (!clk_is_busy(clk)) + break; + if (!i) + return -ETIMEDOUT; + else + return 0; +} + +static bool mx23_enable_h_autoslow(bool enable) +{ + bool currently_enabled; + + if (__raw_readl(CLKCTRL_BASE_ADDR+HW_CLKCTRL_HBUS) & + BM_CLKCTRL_HBUS_AUTO_SLOW_MODE) + currently_enabled = true; + else + currently_enabled = false; + + if (enable) + __raw_writel(BM_CLKCTRL_HBUS_AUTO_SLOW_MODE, + CLKCTRL_BASE_ADDR + HW_CLKCTRL_HBUS_SET); + else + __raw_writel(BM_CLKCTRL_HBUS_AUTO_SLOW_MODE, + CLKCTRL_BASE_ADDR + HW_CLKCTRL_HBUS_CLR); + return currently_enabled; +} + + +static void mx23_set_hbus_autoslow_flags(u16 mask) +{ + u32 reg; + + reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_HBUS); + reg &= 0xFFFF; + reg |= mask << 16; + __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_HBUS); +} + +static void local_clk_disable(struct clk *clk) +{ + if (clk == NULL || IS_ERR(clk) || !clk->ref) + return; + + if ((--clk->ref) & CLK_EN_MASK) + return; + + if (clk->disable) + clk->disable(clk); + local_clk_disable(clk->secondary); + local_clk_disable(clk->parent); +} + +static int local_clk_enable(struct clk *clk) +{ + if (clk == NULL || IS_ERR(clk)) + return -EINVAL; + + if ((clk->ref++) & CLK_EN_MASK) + return 0; + if (clk->parent) + local_clk_enable(clk->parent); + if (clk->secondary) + local_clk_enable(clk->secondary); + if (clk->enable) + clk->enable(clk); + return 0; +} + + +static bool mx23_is_clk_enabled(struct clk *clk) +{ + if (clk->enable_reg) + return (__raw_readl(clk->enable_reg) & + clk->enable_bits) ? 0 : 1; + else + return (clk->ref & CLK_EN_MASK) ? 1 : 0; +} + + static int mx23_raw_enable(struct clk *clk) { unsigned int reg; @@ -48,6 +163,9 @@ static int mx23_raw_enable(struct clk *clk) reg &= ~clk->enable_bits; __raw_writel(reg, clk->enable_reg); } + if (clk->busy_reg) + clk_busy_wait(clk); + return 0; } @@ -61,29 +179,14 @@ static void mx23_raw_disable(struct clk *clk) } } -static unsigned long xtal_get_rate(struct clk *clk) +static unsigned long ref_xtal_get_rate(struct clk *clk) { - int id = clk - xtal_clk; - return xtal_clk_rate[id]; + return 24000000; } -static struct clk xtal_clk[] = { - { - .flags = RATE_FIXED, - .get_rate = xtal_get_rate, - }, - { - .flags = RATE_FIXED, - .get_rate = xtal_get_rate, - }, - { - .flags = RATE_FIXED, - .get_rate = xtal_get_rate, - }, -}; - static struct clk ref_xtal_clk = { - .parent = &xtal_clk[0], + .flags = RATE_FIXED, + .get_rate = ref_xtal_get_rate, }; static unsigned long pll_get_rate(struct clk *clk); @@ -107,20 +210,23 @@ static unsigned long pll_get_rate(struct clk *clk) static int pll_enable(struct clk *clk) { - int timeout = 100; - unsigned long reg; + u32 reg; + + reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_PLLCTRL0); + + if ((reg & BM_CLKCTRL_PLLCTRL0_POWER) && + (reg & BM_CLKCTRL_PLLCTRL0_EN_USB_CLKS)) + return 0; __raw_writel(BM_CLKCTRL_PLLCTRL0_POWER | BM_CLKCTRL_PLLCTRL0_EN_USB_CLKS, CLKCTRL_BASE_ADDR + HW_CLKCTRL_PLLCTRL0_SET); - do { - udelay(10); - reg = __raw_readl(CLKCTRL_BASE_ADDR + - HW_CLKCTRL_PLLCTRL1); - timeout--; - } while ((timeout > 0) && !(reg & BM_CLKCTRL_PLLCTRL1_LOCK)); - if (timeout <= 0) - return -EFAULT; + /* only a 10us delay is need. PLLCTRL1 LOCK bitfied is only a timer + * and is incorrect (excessive). Per definition of the PLLCTRL0 + * POWER field, waiting at least 10us. + */ + udelay(10); + return 0; } @@ -171,6 +277,8 @@ static unsigned long ref_cpu_get_rate(struct clk *clk) static struct clk ref_cpu_clk = { .parent = &pll_clk, + .enable = mx23_raw_enable, + .disable = mx23_raw_disable, .get_rate = ref_cpu_get_rate, .round_rate = ref_clk_round_rate, .set_rate = ref_clk_set_rate, @@ -178,6 +286,8 @@ static struct clk ref_cpu_clk = { .enable_bits = BM_CLKCTRL_FRAC_CLKGATECPU, .scale_reg = CLKCTRL_BASE_ADDR + HW_CLKCTRL_FRAC, .scale_bits = BP_CLKCTRL_FRAC_CPUFRAC, + .busy_reg = CLKCTRL_BASE_ADDR + HW_CLKCTRL_CPU, + .busy_bits = 28, }; static unsigned long ref_emi_get_rate(struct clk *clk) @@ -191,6 +301,8 @@ static unsigned long ref_emi_get_rate(struct clk *clk) static struct clk ref_emi_clk = { .parent = &pll_clk, + .enable = mx23_raw_enable, + .disable = mx23_raw_disable, .get_rate = ref_emi_get_rate, .set_rate = ref_clk_set_rate, .round_rate = ref_clk_round_rate, @@ -202,10 +314,12 @@ static struct clk ref_emi_clk = { static unsigned long ref_io_get_rate(struct clk *clk); static struct clk ref_io_clk = { - .parent = &pll_clk, - .get_rate = ref_io_get_rate, - .enable_reg = CLKCTRL_BASE_ADDR + HW_CLKCTRL_FRAC, - .enable_bits = BM_CLKCTRL_FRAC_CLKGATEIO, + .parent = &pll_clk, + .enable = mx23_raw_enable, + .disable = mx23_raw_disable, + .get_rate = ref_io_get_rate, + .enable_reg = CLKCTRL_BASE_ADDR + HW_CLKCTRL_FRAC, + .enable_bits = BM_CLKCTRL_FRAC_CLKGATEIO, }; static unsigned long ref_io_get_rate(struct clk *clk) @@ -229,6 +343,8 @@ static unsigned long ref_pix_get_rate(struct clk *clk) static struct clk ref_pix_clk = { .parent = &pll_clk, + .enable = mx23_raw_enable, + .disable = mx23_raw_disable, .get_rate = ref_pix_get_rate, .enable_reg = CLKCTRL_BASE_ADDR + HW_CLKCTRL_FRAC, .enable_bits = BM_CLKCTRL_FRAC_CLKGATEPIX, @@ -237,63 +353,20 @@ static struct clk ref_pix_clk = { static struct clk cpu_clk, h_clk; static int clkseq_set_parent(struct clk *clk, struct clk *parent) { - int ret = -EINVAL; - int shift = 8; + int shift; + if (clk->parent == parent) + return 0; /* clock parent already at target. nothing to do */ /* bypass? */ if (parent == &ref_xtal_clk) shift = 4; + else + shift = 8; - if (clk->bypass_reg) { - u32 hbus_val, cpu_val; - - if (clk == &cpu_clk && shift == 4) { - hbus_val = __raw_readl(CLKCTRL_BASE_ADDR + - HW_CLKCTRL_HBUS); - cpu_val = __raw_readl(CLKCTRL_BASE_ADDR + - HW_CLKCTRL_CPU); - - hbus_val &= ~(BM_CLKCTRL_HBUS_DIV_FRAC_EN | - BM_CLKCTRL_HBUS_DIV); - hbus_val |= 1; - - cpu_val &= ~BM_CLKCTRL_CPU_DIV_CPU; - cpu_val |= 1; - - __raw_writel(1 << clk->bypass_bits, - clk->bypass_reg + shift); - - __raw_writel(hbus_val, - CLKCTRL_BASE_ADDR + HW_CLKCTRL_HBUS); - __raw_writel(cpu_val, - CLKCTRL_BASE_ADDR + HW_CLKCTRL_CPU); - /* h_clk.rate = 0; */ - } else if (clk == &cpu_clk && shift == 8) { - hbus_val = __raw_readl(CLKCTRL_BASE_ADDR + - HW_CLKCTRL_HBUS); - cpu_val = __raw_readl(CLKCTRL_BASE_ADDR + - HW_CLKCTRL_CPU); - hbus_val &= ~(BM_CLKCTRL_HBUS_DIV_FRAC_EN | - BM_CLKCTRL_HBUS_DIV); - hbus_val |= 2; - cpu_val &= ~BM_CLKCTRL_CPU_DIV_CPU; - cpu_val |= 2; - - __raw_writel(hbus_val, - CLKCTRL_BASE_ADDR + HW_CLKCTRL_HBUS); - __raw_writel(cpu_val, - CLKCTRL_BASE_ADDR + HW_CLKCTRL_CPU); - /* h_clk.rate = 0; */ + if (clk->bypass_reg) + __raw_writel(1 << clk->bypass_bits, clk->bypass_reg + shift); - __raw_writel(1 << clk->bypass_bits, - clk->bypass_reg + shift); - } else - __raw_writel(1 << clk->bypass_bits, - clk->bypass_reg + shift); - ret = 0; - } - - return ret; + return 0; } static unsigned long lcdif_get_rate(struct clk *clk) @@ -336,6 +409,8 @@ static int lcdif_set_rate(struct clk *clk, unsigned long rate) ns_cycle *= 2; /* Fix calculate double frequency */ + + for (div = 1; div < 256; ++div) { u32 fracdiv; u32 ps_result; @@ -394,16 +469,9 @@ static int lcdif_set_rate(struct clk *clk, unsigned long rate) __raw_writel(reg_val, clk->scale_reg); /* Wait for divider update */ - if (clk->busy_reg) { - int i; - for (i = 10000; i; i--) - if (!clk_is_busy(clk)) - break; - if (!i) { - ret = -ETIMEDOUT; - goto out; - } - } + ret = clk_busy_wait(clk); + if (ret) + goto out; /* Switch to ref_pix source */ reg_val = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_CLKSEQ); @@ -414,6 +482,14 @@ out: return ret; } +/* + * We set lcdif_clk's parent as &pll_clk deliberately, although + * in IC spec lcdif_clk(CLK_PIX) is derived from ref_pix which in turn + * is derived from PLL. By doing so, users just need to set/get clock rate + * for lcdif_clk, without need to take care of ref_pix, because the clock + * driver will automatically calculate the fracdivider for HW_CLKCTRL_FRAC + * and the divider for HW_CLKCTRL_PIX conjointly. + */ static struct clk lcdif_clk = { .parent = &pll_clk, .enable = mx23_raw_enable, @@ -464,20 +540,77 @@ static unsigned long cpu_round_rate(struct clk *clk, unsigned long rate) static int cpu_set_rate(struct clk *clk, unsigned long rate) { - unsigned long root_rate = - clk->parent->parent->get_rate(clk->parent->parent); - int i; + unsigned long root_rate = pll_clk.get_rate(&pll_clk); + int ret = -EINVAL; u32 clkctrl_cpu = 1; u32 c = clkctrl_cpu; u32 clkctrl_frac = 1; u32 val; - u32 reg_val; + u32 reg_val, hclk_reg; + bool h_autoslow; - if (rate < 24000000) + /* make sure the cpu div_xtal is 1 */ + reg_val = __raw_readl(CLKCTRL_BASE_ADDR+HW_CLKCTRL_CPU); + reg_val &= ~(BM_CLKCTRL_CPU_DIV_XTAL); + reg_val |= (1 << BP_CLKCTRL_CPU_DIV_XTAL); + __raw_writel(reg_val, CLKCTRL_BASE_ADDR+HW_CLKCTRL_CPU); + + if (rate < ref_xtal_get_rate(&ref_xtal_clk)) return -EINVAL; - else if (rate == 24000000) { + + if (rate == clk_get_rate(clk)) + return 0; + /* temporaily disable h autoslow to avoid + * hclk getting too slow while temporarily + * changing clocks + */ + h_autoslow = mx23_enable_h_autoslow(false); + + if (rate == ref_xtal_get_rate(&ref_xtal_clk)) { + /* switch to the 24M source */ clk_set_parent(clk, &ref_xtal_clk); + + /* to avoid bus starvation issues, we'll go ahead + * and change hbus clock divider to 1 now. Cpufreq + * or other clock management can lower it later if + * desired for power savings or other reasons, but + * there should be no need to with hbus autoslow + * functionality enabled. + */ + + ret = clk_busy_wait(&cpu_clk); + if (ret) { + printk(KERN_ERR "* couldn't set\ + up CPU divisor\n"); + return ret; + } + + ret = clk_busy_wait(&h_clk); + if (ret) { + printk(KERN_ERR "* H_CLK busy timeout\n"); + return ret; + } + + hclk_reg = __raw_readl(CLKCTRL_BASE_ADDR+HW_CLKCTRL_HBUS); + hclk_reg &= ~(BM_CLKCTRL_HBUS_DIV); + hclk_reg |= (1 << BP_CLKCTRL_HBUS_DIV); + + __raw_writel(hclk_reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_HBUS_SET); + + ret = clk_busy_wait(&cpu_clk); + if (ret) { + printk(KERN_ERR "** couldn't set\ + up CPU divisor\n"); + return ret; + } + + ret = clk_busy_wait(&h_clk); + if (ret) { + printk(KERN_ERR "** CLK busy timeout\n"); + return ret; + } + } else { for ( ; c < 0x40; c++) { u32 f = ((root_rate/1000)*18/c + (rate/1000)/2) / @@ -502,33 +635,116 @@ static int cpu_set_rate(struct clk *clk, unsigned long rate) if ((abs(d) > 100) || (clkctrl_frac < 18) || (clkctrl_frac > 35)) return -EINVAL; - } + } - /* Set Frac div */ + /* prepare Frac div */ val = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_FRAC); - val &= ~(BM_CLKCTRL_FRAC_CPUFRAC << BP_CLKCTRL_FRAC_CPUFRAC); - val |= clkctrl_frac; - __raw_writel(val, CLKCTRL_BASE_ADDR + HW_CLKCTRL_FRAC); - /* Do not gate */ - __raw_writel(BM_CLKCTRL_FRAC_CLKGATECPU, CLKCTRL_BASE_ADDR + - HW_CLKCTRL_FRAC_CLR); + val &= ~(BM_CLKCTRL_FRAC_CPUFRAC); + val |= (clkctrl_frac << BP_CLKCTRL_FRAC_CPUFRAC); - /* write clkctrl_cpu */ + /* prepare clkctrl_cpu div*/ reg_val = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_CPU); reg_val &= ~0x3F; reg_val |= clkctrl_cpu; + /* set safe hbus clock divider. A divider of 3 ensure that + * the Vddd voltage required for the cpuclk is sufficiently + * high for the hbus clock and under 24MHz cpuclk conditions, + * a divider of at least 3 ensures hbusclk doesn't remain + * uneccesarily low which hurts performance + */ + hclk_reg = __raw_readl(CLKCTRL_BASE_ADDR+HW_CLKCTRL_HBUS); + hclk_reg &= ~(BM_CLKCTRL_HBUS_DIV); + hclk_reg |= (3 << BP_CLKCTRL_HBUS_DIV); + + /* if the pll was OFF, we need to turn it ON. + * Even if it was ON, we want to temporarily + * increment it by 1 to avoid turning off + * in the upcoming parent clock change to xtal. This + * avoids waiting an extra 10us for every cpu clock + * change between ref_cpu sourced frequencies. + */ + pll_enable(&pll_clk); + pll_clk.ref++; + + /* switch to XTAL CLK source temparily while + * we manipulate ref_cpu frequency */ + clk_set_parent(clk, &ref_xtal_clk); + + ret = clk_busy_wait(&h_clk); + + if (ret) { + printk(KERN_ERR "-* HCLK busy wait timeout\n"); + return ret; + } + + ret = clk_busy_wait(clk); + + if (ret) { + printk(KERN_ERR "-* couldn't set\ + up CPU divisor\n"); + return ret; + } + + __raw_writel(val, CLKCTRL_BASE_ADDR + HW_CLKCTRL_FRAC); + + /* clear the gate */ + __raw_writel(BM_CLKCTRL_FRAC_CLKGATECPU, CLKCTRL_BASE_ADDR + + HW_CLKCTRL_FRAC_CLR); + + /* set the ref_cpu integer divider */ __raw_writel(reg_val, CLKCTRL_BASE_ADDR + HW_CLKCTRL_CPU); - for (i = 10000; i; i--) - if (!clk_is_busy(clk)) - break; - if (!i) { - printk(KERN_ERR "couldn't set up CPU divisor\n"); - return -ETIMEDOUT; + /* wait for the ref_cpu path to become stable before + * switching over to it + */ + + ret = clk_busy_wait(&ref_cpu_clk); + + if (ret) { + printk(KERN_ERR "-** couldn't set\ + up CPU divisor\n"); + return ret; } + + /* change hclk divider to safe value for any ref_cpu + * value. + */ + __raw_writel(hclk_reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_HBUS); + + ret = clk_busy_wait(&h_clk); + + if (ret) { + printk(KERN_ERR "-** HCLK busy wait timeout\n"); + return ret; + } + + clk_set_parent(clk, &ref_cpu_clk); + + /* decrement the pll_clk ref count because + * we temporarily enabled/incremented the count + * above. + */ + pll_clk.ref--; + + ret = clk_busy_wait(&cpu_clk); + + if (ret) { + printk(KERN_ERR "-*** Couldn't set\ + up CPU divisor\n"); + return ret; + } + + ret = clk_busy_wait(&h_clk); + + if (ret) { + printk(KERN_ERR "-*** HCLK busy wait timeout\n"); + return ret; + } + } - return 0; + mx23_enable_h_autoslow(h_autoslow); + return ret; } static struct clk cpu_clk = { @@ -543,6 +759,7 @@ static struct clk cpu_clk = { .bypass_bits = 7, .busy_reg = CLKCTRL_BASE_ADDR + HW_CLKCTRL_CPU, .busy_bits = 28, + .xtal_busy_bits = 29, }; static unsigned long uart_get_rate(struct clk *clk) @@ -598,25 +815,99 @@ static unsigned long x_get_rate(struct clk *clk) return clk->parent->get_rate(clk->parent) / reg; } +static unsigned long x_round_rate(struct clk *clk, unsigned long rate) +{ + unsigned int root_rate, frac_rate; + unsigned int div; + root_rate = clk->parent->get_rate(clk->parent); + frac_rate = root_rate % rate; + div = root_rate / rate; + /* while the reference manual specifies that divider + * values up to 1023 are aloud, other critial SoC compents + * require higher x clock values at all times. Through + * limited testing, the lradc functionality to measure + * the battery voltage and copy this value to the + * power supply requires at least a 64kHz xclk. + * so the divider will be limited to 375. + */ + if ((div == 0) || (div > 375)) + return root_rate; + if (frac_rate == 0) + return rate; + else + return root_rate / (div + 1); +} + +static int x_set_rate(struct clk *clk, unsigned long rate) +{ + unsigned long root_rate; + unsigned long round_rate; + unsigned int reg, div; + root_rate = clk->parent->get_rate(clk->parent); + + if ((!clk->round_rate) || !(clk->scale_reg)) + return -EINVAL; + + round_rate = clk->round_rate(clk, rate); + div = root_rate / round_rate; + + if (root_rate % round_rate) + return -EINVAL; + + reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_XBUS); + reg &= ~(BM_CLKCTRL_XBUS_DIV_FRAC_EN | BM_CLKCTRL_XBUS_DIV); + reg |= BF_CLKCTRL_XBUS_DIV(div); + __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_XBUS); + + return clk_busy_wait(clk); + +} + static struct clk x_clk = { .parent = &ref_xtal_clk, .get_rate = x_get_rate, + .set_rate = x_set_rate, + .round_rate = x_round_rate, + .scale_reg = CLKCTRL_BASE_ADDR + HW_CLKCTRL_XBUS, + .busy_reg = CLKCTRL_BASE_ADDR + HW_CLKCTRL_XBUS, + .busy_bits = 31, }; + + static struct clk ana_clk = { .parent = &ref_xtal_clk, }; -static unsigned long rtc_get_rate(struct clk *clk) + + +static unsigned long xtal_clock32k_get_rate(struct clk *clk) { - if (clk->parent == &xtal_clk[2]) - return clk->parent->get_rate(clk->parent); - return clk->parent->get_rate(clk->parent) / 768; + if (__raw_readl(RTC_BASE_ADDR + HW_RTC_PERSISTENT0) & + BM_RTC_PERSISTENT0_XTAL32_FREQ) + return 32000; + else + return 32768; } -static struct clk rtc_clk = { - .parent = &ref_xtal_clk, - .get_rate = rtc_get_rate, +static struct clk xtal_clock32k_clk = { + .get_rate = xtal_clock32k_get_rate, +}; + +static unsigned long rtc32k_get_rate(struct clk *clk) +{ + if (clk->parent == &ref_xtal_clk) + /* mx23 reference manual had error. + * fixed divider is 750 not 768 + */ + return clk->parent->get_rate(clk->parent) / 750; + else + return xtal_clock32k_get_rate(clk); +} + +static struct clk rtc32k_clk = { + .parent = &xtal_clock32k_clk, + .get_rate = rtc32k_get_rate, }; static unsigned long h_get_rate(struct clk *clk) @@ -656,23 +947,14 @@ static int h_set_rate(struct clk *clk, unsigned long rate) if (root_rate % round_rate) return -EINVAL; - if ((root_rate < rate) && (root_rate == 64000000)) - div = 3; - reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_HBUS); reg &= ~(BM_CLKCTRL_HBUS_DIV_FRAC_EN | BM_CLKCTRL_HBUS_DIV); reg |= BF_CLKCTRL_HBUS_DIV(div); __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_HBUS); - if (clk->busy_reg) { - int i; - for (i = 10000; i; i--) - if (!clk_is_busy(clk)) - break; - if (!i) { - printk(KERN_ERR "couldn't set up AHB divisor\n"); - return -ETIMEDOUT; - } + if (clk_busy_wait(clk)) { + printk(KERN_ERR "couldn't set up AHB divisor\n"); + return -EINVAL; } return 0; @@ -720,29 +1002,39 @@ static unsigned long emi_round_rate(struct clk *clk, unsigned long rate) return root_rate / div; } +/* when changing the emi clock, dram access must be + * disabled. Special handling is needed to perform + * the emi clock change without touching sdram. + */ static int emi_set_rate(struct clk *clk, unsigned long rate) { int ret = 0; - if (rate < 24000) + struct mxs_emi_scaling_data sc_data; + + unsigned long clkctrl_emi; + unsigned long clkctrl_frac; + int div = 1; + unsigned long root_rate, cur_emi_div, cur_emi_frac; + struct clk *target_parent_p = &ref_xtal_clk; + + if (rate < ref_xtal_get_rate(&ref_xtal_clk)) return -EINVAL; - else { - int i; - struct mxs_emi_scaling_data sc_data; - int (*scale)(struct mxs_emi_scaling_data *) = - (void *)(MX23_OCRAM_BASE + 0x1000); - void *saved_ocram; - unsigned long clkctrl_emi; - unsigned long clkctrl_frac; - int div = 1; - unsigned long root_rate = - clk->parent->parent->get_rate(clk->parent->parent); - /* - * We've been setting div to HW_CLKCTRL_CPU_RD() & 0x3f so far. - * TODO: verify 1 is still valid. - */ - if (!mxs_ram_funcs_sz) - goto out; + + if (!mxs_ram_funcs_sz) + goto out; + + sc_data.cur_freq = (clk->get_rate(clk)) / 1000 / 1000; + sc_data.new_freq = rate / 1000 / 1000; + + if (sc_data.cur_freq == sc_data.new_freq) + goto out; + + if (rate != ref_xtal_get_rate(&ref_xtal_clk)) { + target_parent_p = &ref_emi_clk; + pll_enable(&pll_clk); + + root_rate = pll_clk.get_rate(&pll_clk); for (clkctrl_emi = div; clkctrl_emi < 0x3f; clkctrl_emi += div) { @@ -764,37 +1056,62 @@ static int emi_set_rate(struct clk *clk, unsigned long rate) pr_debug("%s: clkctrl_emi %ld, clkctrl_frac %ld\n", __func__, clkctrl_emi, clkctrl_frac); - saved_ocram = kmalloc(mxs_ram_funcs_sz, GFP_KERNEL); - if (!saved_ocram) - return -ENOMEM; - memcpy(saved_ocram, scale, mxs_ram_funcs_sz); - memcpy(scale, mxs_ram_freq_scale, mxs_ram_funcs_sz); - sc_data.emi_div = clkctrl_emi; sc_data.frac_div = clkctrl_frac; - sc_data.cur_freq = (clk->get_rate(clk)) / 1000 / 1000; - sc_data.new_freq = rate / 1000 / 1000; + } + + + cur_emi_div = ((__raw_readl(CLKCTRL_BASE_ADDR+HW_CLKCTRL_EMI) & + BM_CLKCTRL_EMI_DIV_EMI) >> BP_CLKCTRL_EMI_DIV_EMI); + cur_emi_frac = ((__raw_readl(CLKCTRL_BASE_ADDR+HW_CLKCTRL_FRAC) & + BM_CLKCTRL_EMI_DIV_EMI) >> BP_CLKCTRL_FRAC_EMIFRAC); + + if ((cur_emi_div == sc_data.emi_div) && + (cur_emi_frac == sc_data.frac_div)) + goto out; + { + unsigned long iram_phy; + bool h_autoslow; + int (*scale)(struct mxs_emi_scaling_data *) = + iram_alloc(mxs_ram_funcs_sz, &iram_phy); + + if (NULL == scale) { + pr_err("%s Not enough iram\n", __func__); + return -ENOMEM; + } + + /* temporaily disable h autoslow to maximize + * performance/minimize time spent with no + * sdram access + */ + h_autoslow = mx23_enable_h_autoslow(false); + + memcpy(scale, mxs_ram_freq_scale, mxs_ram_funcs_sz); local_irq_disable(); local_fiq_disable(); scale(&sc_data); + iram_free(iram_phy, mxs_ram_funcs_sz); + local_fiq_enable(); local_irq_enable(); - for (i = 10000; i; i--) - if (!clk_is_busy(clk)) - break; - memcpy(scale, saved_ocram, mxs_ram_funcs_sz); - kfree(saved_ocram); - - if (!i) { - printk(KERN_ERR "couldn't set up EMI divisor\n"); - ret = -ETIMEDOUT; - goto out; - } + /* temporaily disable h autoslow to avoid + * hclk getting too slow while temporarily + * changing clocks + */ + mx23_enable_h_autoslow(h_autoslow); } + + /* this code is for keeping track of ref counts. + * and disabling previous parent if necessary + * actual clkseq changes have already + * been made. + */ + clk_set_parent(clk, target_parent_p); + out: return ret; } @@ -812,8 +1129,9 @@ static struct clk emi_clk = { .scale_reg = CLKCTRL_BASE_ADDR + HW_CLKCTRL_FRAC, .busy_reg = CLKCTRL_BASE_ADDR + HW_CLKCTRL_EMI, .busy_bits = 28, + .xtal_busy_bits = 29, .bypass_reg = CLKCTRL_BASE_ADDR + HW_CLKCTRL_CLKSEQ, - .bypass_bits = 7, + .bypass_bits = 6, }; static unsigned long ssp_get_rate(struct clk *clk); @@ -821,37 +1139,40 @@ static unsigned long ssp_get_rate(struct clk *clk); static int ssp_set_rate(struct clk *clk, unsigned long rate) { int ret = -EINVAL; - int div = (clk_get_rate(clk->parent) + rate - 1) / rate; - u32 reg_frac; - const int mask = 0x1FF; - int try = 10; - int i = -1; + u32 reg, div; + bool is_clk_enable; - if (div == 0 || div > mask) - goto out; + is_clk_enable = mx23_is_clk_enabled(clk); + if (!is_clk_enable) + local_clk_enable(clk); - reg_frac = __raw_readl(clk->scale_reg); - reg_frac &= ~(mask << clk->scale_bits); + /* if the desired clock can be sourced from ref_xtal, + * use ref_xtal to save power + */ + if ((rate <= ref_xtal_get_rate(&ref_xtal_clk)) && + ((ref_xtal_get_rate(&ref_xtal_clk) % rate) == 0)) + clk_set_parent(clk, &ref_xtal_clk); + else + clk_set_parent(clk, &ref_io_clk); - while (try--) { - __raw_writel(reg_frac | (div << clk->scale_bits), - clk->scale_reg); + if (rate > PLL_ENABLED_MAX_CLK_SSP) + rate = PLL_ENABLED_MAX_CLK_SSP; - if (clk->busy_reg) { - for (i = 10000; i; i--) - if (!clk_is_busy(clk)) - break; - } - if (i) - break; - } + div = (clk_get_rate(clk->parent) + rate - 1) / rate; - if (!i) - ret = -ETIMEDOUT; - else - ret = 0; + if (div == 0 || div > BM_CLKCTRL_SSP_DIV) + goto out; + + reg = __raw_readl(clk->scale_reg); + reg &= ~(BM_CLKCTRL_SSP_DIV | BM_CLKCTRL_SSP_DIV_FRAC_EN); + reg |= div << clk->scale_bits; + __raw_writel(reg, clk->scale_reg); + ret = clk_busy_wait(clk); out: + if (!is_clk_enable) + local_clk_disable(clk); + if (ret != 0) printk(KERN_ERR "%s: error %d\n", __func__, ret); return ret; @@ -877,6 +1198,26 @@ static int ssp_set_parent(struct clk *clk, struct clk *parent) return ret; } +/* handle peripheral clocks whose optimal parent dependent on + * system parameters such as cpu_clk rate. For now, this optimization + * only occurs to the peripheral clock when it's not in use to avoid + * handling more complex system clock coordination issues. + */ +static int ssp_set_sys_dependent_parent(struct clk *clk) +{ + if ((clk->ref & CLK_EN_MASK) == 0) { + if (clk_get_rate(&cpu_clk) > ref_xtal_get_rate(&ref_xtal_clk)) { + clk_set_parent(clk, &ref_io_clk); + clk_set_rate(clk, PLL_ENABLED_MAX_CLK_SSP); + } else { + clk_set_parent(clk, &ref_xtal_clk); + clk_set_rate(clk, ref_xtal_get_rate(&ref_xtal_clk)); + } + } + + return 0; +} + static struct clk ssp_clk = { .parent = &ref_io_clk, .get_rate = ssp_get_rate, @@ -889,9 +1230,10 @@ static struct clk ssp_clk = { .scale_reg = CLKCTRL_BASE_ADDR + HW_CLKCTRL_SSP, .scale_bits = 0, .bypass_reg = CLKCTRL_BASE_ADDR + HW_CLKCTRL_CLKSEQ, - .bypass_bits = 3, + .bypass_bits = 5, .set_rate = ssp_set_rate, .set_parent = ssp_set_parent, + .set_sys_dependent_parent = ssp_set_sys_dependent_parent, }; static unsigned long ssp_get_rate(struct clk *clk) @@ -903,6 +1245,123 @@ static unsigned long ssp_get_rate(struct clk *clk) return clk->parent->get_rate(clk->parent) / reg; } +static unsigned long gpmi_get_rate(struct clk *clk) +{ + unsigned int reg; + reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_GPMI) & + BM_CLKCTRL_GPMI_DIV; + + return clk->parent->get_rate(clk->parent) / reg; +} + +static int gpmi_set_rate(struct clk *clk, unsigned long rate) +{ + int ret = -EINVAL; + u32 reg, div; + + /* Make absolutely certain the clock is enabled. */ + local_clk_enable(clk); + + /* if the desired clock can be sourced from ref_xtal, + * use ref_xtal to save power + */ + if ((rate <= ref_xtal_get_rate(&ref_xtal_clk)) && + ((ref_xtal_get_rate(&ref_xtal_clk) % rate) == 0)) + clk_set_parent(clk, &ref_xtal_clk); + else + clk_set_parent(clk, &ref_io_clk); + + if (rate > PLL_ENABLED_MAX_CLK_SSP) + rate = PLL_ENABLED_MAX_CLK_GPMI; + + div = (clk_get_rate(clk->parent) + rate - 1) / rate; + + if (div == 0 || div > BM_CLKCTRL_GPMI_DIV) + goto out; + + reg = __raw_readl(clk->scale_reg); + reg &= ~(BM_CLKCTRL_GPMI_DIV | BM_CLKCTRL_GPMI_DIV_FRAC_EN); + reg |= div << clk->scale_bits; + __raw_writel(reg, clk->scale_reg); + + ret = clk_busy_wait(clk); + +out: + + /* Undo the enable above. */ + local_clk_disable(clk); + + if (ret != 0) + printk(KERN_ERR "%s: error %d\n", __func__, ret); + return ret; +} + +static int gpmi_set_parent(struct clk *clk, struct clk *parent) +{ + int ret = -EINVAL; + + if (clk->bypass_reg) { + if (clk->parent == parent) + return 0; + if (parent == &ref_io_clk) + __raw_writel(1 << clk->bypass_bits, + clk->bypass_reg + CLR_REGISTER); + else + __raw_writel(1 << clk->bypass_bits, + clk->bypass_reg + SET_REGISTER); + clk->parent = parent; + ret = 0; + } + + return ret; +} + +/* handle peripheral clocks whose optimal parent dependent on + * system parameters such as cpu_clk rate. For now, this optimization + * only occurs to the peripheral clock when it's not in use to avoid + * handling more complex system clock coordination issues. + */ +static int gpmi_set_sys_dependent_parent(struct clk *clk) +{ + + if ((clk->ref & CLK_EN_MASK) == 0) { + if (clk_get_rate(&cpu_clk) > ref_xtal_get_rate(&ref_xtal_clk)) { + clk_set_parent(clk, &ref_io_clk); + clk_set_rate(clk, PLL_ENABLED_MAX_CLK_GPMI); + } else { + clk_set_parent(clk, &ref_xtal_clk); + clk_set_rate(clk, ref_xtal_get_rate(&ref_xtal_clk)); + } + } + + return 0; +} + +static struct clk gpmi_clk = { + .parent = &ref_io_clk, + .secondary = 0, + .flags = 0, + .set_parent = gpmi_set_parent, + .set_sys_dependent_parent = gpmi_set_sys_dependent_parent, + + .enable_reg = CLKCTRL_BASE_ADDR + HW_CLKCTRL_GPMI, + .enable_bits = BM_CLKCTRL_GPMI_CLKGATE, + .enable = mx23_raw_enable, + .disable = mx23_raw_disable, + + .scale_reg = CLKCTRL_BASE_ADDR + HW_CLKCTRL_GPMI, + .scale_bits = 0, + .round_rate = 0, + .set_rate = gpmi_set_rate, + .get_rate = gpmi_get_rate, + + .bypass_reg = CLKCTRL_BASE_ADDR + HW_CLKCTRL_CLKSEQ, + .bypass_bits = 4, + + .busy_reg = CLKCTRL_BASE_ADDR + HW_CLKCTRL_GPMI, + .busy_bits = 29, +}; + static unsigned long pcmspdif_get_rate(struct clk *clk) { return clk->parent->get_rate(clk->parent) / 4; @@ -935,20 +1394,33 @@ static struct clk audio_clk = { .enable_bits = BM_CLKCTRL_XTAL_FILT_CLK24M_GATE, }; +static struct clk vid_clk = { + .parent = &ref_xtal_clk, + .enable = mx23_raw_enable, + .disable = mx23_raw_disable, + .enable_reg = CLKCTRL_BASE_ADDR + HW_CLKCTRL_FRAC1, + .enable_bits = BM_CLKCTRL_FRAC1_CLKGATEVID, +}; + +static struct clk tv108M_ng_clk = { + .parent = &vid_clk, + .enable = mx23_raw_enable, + .disable = mx23_raw_disable, + .enable_reg = CLKCTRL_BASE_ADDR + HW_CLKCTRL_TV, + .enable_bits = BM_CLKCTRL_TV_CLK_TV108M_GATE, + .flags = RATE_FIXED, +}; + +static struct clk tv27M_clk = { + .parent = &vid_clk, + .enable = mx23_raw_enable, + .disable = mx23_raw_disable, + .enable_reg = CLKCTRL_BASE_ADDR + HW_CLKCTRL_TV, + .enable_bits = BM_CLKCTRL_TV_CLK_TV_GATE, + .flags = RATE_FIXED, +}; static struct clk_lookup onchip_clocks[] = { - { - .con_id = "xtal.0", - .clk = &xtal_clk[0], - }, - { - .con_id = "xtal.1", - .clk = &xtal_clk[1], - }, - { - .con_id = "xtal.2", - .clk = &xtal_clk[2], - }, { .con_id = "pll.0", .clk = &pll_clk, @@ -977,9 +1449,13 @@ static struct clk_lookup onchip_clocks[] = { .con_id = "lcdif", .clk = &lcdif_clk, }, + { + .con_id = "xtal_clock32k", + .clk = &xtal_clock32k_clk, + }, { .con_id = "rtc", - .clk = &rtc_clk, + .clk = &rtc32k_clk, }, { .con_id = "cpu", @@ -1032,9 +1508,53 @@ static struct clk_lookup onchip_clocks[] = { { .con_id = "spdif", .clk = &pcmspdif_clk, - } + }, + { + .con_id = "ref_vid", + .clk = &vid_clk, + }, + { + .con_id = "tv108M_ng", + .clk = &tv108M_ng_clk, + }, + { + .con_id = "tv27M", + .clk = &tv27M_clk, + }, + { + .con_id = "gpmi", + .clk = &gpmi_clk, + }, }; +/* for debugging */ +#ifdef DEBUG +static void print_ref_counts(void) +{ + + printk(KERN_INFO "pll_clk ref count: %i\n", + pll_clk.ref & CLK_EN_MASK); + + printk(KERN_INFO "ref_cpu_clk ref count: %i\n", + ref_cpu_clk.ref & CLK_EN_MASK); + + printk(KERN_INFO "ref_emi_clk ref count: %i\n", + ref_emi_clk.ref & CLK_EN_MASK); + + printk(KERN_INFO "lcdif_clk ref count: %i\n", + lcdif_clk.ref & CLK_EN_MASK); + + printk(KERN_INFO "ref_io_clk ref count: %i\n", + ref_io_clk.ref & CLK_EN_MASK); + + printk(KERN_INFO "ssp_clk ref count: %i\n", + ssp_clk.ref & CLK_EN_MASK); + + printk(KERN_INFO "gpmi_clk ref count: %i\n", + gpmi_clk.ref & CLK_EN_MASK); + +} +#endif static void mx23_clock_scan(void) { @@ -1046,16 +1566,19 @@ static void mx23_clock_scan(void) emi_clk.parent = &ref_xtal_clk; if (reg & BM_CLKCTRL_CLKSEQ_BYPASS_SSP) ssp_clk.parent = &ref_xtal_clk; -}; + if (reg & BM_CLKCTRL_CLKSEQ_BYPASS_GPMI) + gpmi_clk.parent = &ref_xtal_clk; + reg = __raw_readl(RTC_BASE_ADDR + HW_RTC_PERSISTENT0); + if (!(reg & BM_RTC_PERSISTENT0_CLOCKSOURCE)) + rtc32k_clk.parent = &ref_xtal_clk; +}; void __init mx23_set_input_clk(unsigned long xtal0, unsigned long xtal1, unsigned long xtal2, unsigned long enet) { - xtal_clk_rate[0] = xtal0; - xtal_clk_rate[1] = xtal1; - xtal_clk_rate[2] = xtal2; + } void __init mx23_clock_init(void) @@ -1067,4 +1590,7 @@ void __init mx23_clock_init(void) clk_enable(&cpu_clk); clk_enable(&emi_clk); + + clk_en_public_h_asm_ctrl(mx23_enable_h_autoslow, + mx23_set_hbus_autoslow_flags); } -- cgit v1.2.3