Fork of the vendor (Boundary Devices) u-boot for Reform 2, with minor tweaks. The goal is to migrate to mainstream u-boot or barebox ASAP. The main impediment so far is the 4GB RAM config.
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
This repo is archived. You can view files and clone it, but cannot push or open issues/pull-requests.
 
 
 
 
 
 

716 lines
19 KiB

  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Copyright (C) 2013-2014 Synopsys, Inc. All rights reserved.
  4. */
  5. #include <config.h>
  6. #include <common.h>
  7. #include <linux/compiler.h>
  8. #include <linux/kernel.h>
  9. #include <linux/log2.h>
  10. #include <asm/arcregs.h>
  11. #include <asm/arc-bcr.h>
  12. #include <asm/cache.h>
  13. /*
  14. * [ NOTE 1 ]:
  15. * Data cache (L1 D$ or SL$) entire invalidate operation or data cache disable
  16. * operation may result in unexpected behavior and data loss even if we flush
  17. * data cache right before invalidation. That may happens if we store any context
  18. * on stack (like we store BLINK register on stack before function call).
  19. * BLINK register is the register where return address is automatically saved
  20. * when we do function call with instructions like 'bl'.
  21. *
  22. * There is the real example:
  23. * We may hang in the next code as we store any BLINK register on stack in
  24. * invalidate_dcache_all() function.
  25. *
  26. * void flush_dcache_all() {
  27. * __dc_entire_op(OP_FLUSH);
  28. * // Other code //
  29. * }
  30. *
  31. * void invalidate_dcache_all() {
  32. * __dc_entire_op(OP_INV);
  33. * // Other code //
  34. * }
  35. *
  36. * void foo(void) {
  37. * flush_dcache_all();
  38. * invalidate_dcache_all();
  39. * }
  40. *
  41. * Now let's see what really happens during that code execution:
  42. *
  43. * foo()
  44. * |->> call flush_dcache_all
  45. * [return address is saved to BLINK register]
  46. * [push BLINK] (save to stack) ![point 1]
  47. * |->> call __dc_entire_op(OP_FLUSH)
  48. * [return address is saved to BLINK register]
  49. * [flush L1 D$]
  50. * return [jump to BLINK]
  51. * <<------
  52. * [other flush_dcache_all code]
  53. * [pop BLINK] (get from stack)
  54. * return [jump to BLINK]
  55. * <<------
  56. * |->> call invalidate_dcache_all
  57. * [return address is saved to BLINK register]
  58. * [push BLINK] (save to stack) ![point 2]
  59. * |->> call __dc_entire_op(OP_FLUSH)
  60. * [return address is saved to BLINK register]
  61. * [invalidate L1 D$] ![point 3]
  62. * // Oops!!!
  63. * // We lose return address from invalidate_dcache_all function:
  64. * // we save it to stack and invalidate L1 D$ after that!
  65. * return [jump to BLINK]
  66. * <<------
  67. * [other invalidate_dcache_all code]
  68. * [pop BLINK] (get from stack)
  69. * // we don't have this data in L1 dcache as we invalidated it in [point 3]
  70. * // so we get it from next memory level (for example DDR memory)
  71. * // but in the memory we have value which we save in [point 1], which
  72. * // is return address from flush_dcache_all function (instead of
  73. * // address from current invalidate_dcache_all function which we
  74. * // saved in [point 2] !)
  75. * return [jump to BLINK]
  76. * <<------
  77. * // As BLINK points to invalidate_dcache_all, we call it again and
  78. * // loop forever.
  79. *
  80. * Fortunately we may fix that by using flush & invalidation of D$ with a single
  81. * one instruction (instead of flush and invalidation instructions pair) and
  82. * enabling force function inline with '__attribute__((always_inline))' gcc
  83. * attribute to avoid any function call (and BLINK store) between cache flush
  84. * and disable.
  85. *
  86. *
  87. * [ NOTE 2 ]:
  88. * As of today we only support the following cache configurations on ARC.
  89. * Other configurations may exist in HW (for example, since version 3.0 HS
  90. * supports SL$ (L2 system level cache) disable) but we don't support it in SW.
  91. * Configuration 1:
  92. * ______________________
  93. * | |
  94. * | ARC CPU |
  95. * |______________________|
  96. * ___|___ ___|___
  97. * | | | |
  98. * | L1 I$ | | L1 D$ |
  99. * |_______| |_______|
  100. * on/off on/off
  101. * ___|______________|____
  102. * | |
  103. * | main memory |
  104. * |______________________|
  105. *
  106. * Configuration 2:
  107. * ______________________
  108. * | |
  109. * | ARC CPU |
  110. * |______________________|
  111. * ___|___ ___|___
  112. * | | | |
  113. * | L1 I$ | | L1 D$ |
  114. * |_______| |_______|
  115. * on/off on/off
  116. * ___|______________|____
  117. * | |
  118. * | L2 (SL$) |
  119. * |______________________|
  120. * always must be on
  121. * ___|______________|____
  122. * | |
  123. * | main memory |
  124. * |______________________|
  125. *
  126. * Configuration 3:
  127. * ______________________
  128. * | |
  129. * | ARC CPU |
  130. * |______________________|
  131. * ___|___ ___|___
  132. * | | | |
  133. * | L1 I$ | | L1 D$ |
  134. * |_______| |_______|
  135. * on/off must be on
  136. * ___|______________|____ _______
  137. * | | | |
  138. * | L2 (SL$) |-----| IOC |
  139. * |______________________| |_______|
  140. * always must be on on/off
  141. * ___|______________|____
  142. * | |
  143. * | main memory |
  144. * |______________________|
  145. */
  146. DECLARE_GLOBAL_DATA_PTR;
  147. /* Bit values in IC_CTRL */
  148. #define IC_CTRL_CACHE_DISABLE BIT(0)
  149. /* Bit values in DC_CTRL */
  150. #define DC_CTRL_CACHE_DISABLE BIT(0)
  151. #define DC_CTRL_INV_MODE_FLUSH BIT(6)
  152. #define DC_CTRL_FLUSH_STATUS BIT(8)
  153. #define OP_INV BIT(0)
  154. #define OP_FLUSH BIT(1)
  155. #define OP_FLUSH_N_INV (OP_FLUSH | OP_INV)
  156. /* Bit val in SLC_CONTROL */
  157. #define SLC_CTRL_DIS 0x001
  158. #define SLC_CTRL_IM 0x040
  159. #define SLC_CTRL_BUSY 0x100
  160. #define SLC_CTRL_RGN_OP_INV 0x200
  161. #define CACHE_LINE_MASK (~(gd->arch.l1_line_sz - 1))
  162. /*
  163. * We don't want to use '__always_inline' macro here as it can be redefined
  164. * to simple 'inline' in some cases which breaks stuff. See [ NOTE 1 ] for more
  165. * details about the reasons we need to use always_inline functions.
  166. */
  167. #define inlined_cachefunc inline __attribute__((always_inline))
  168. static inlined_cachefunc void __ic_entire_invalidate(void);
  169. static inlined_cachefunc void __dc_entire_op(const int cacheop);
  170. static inline bool pae_exists(void)
  171. {
  172. /* TODO: should we compare mmu version from BCR and from CONFIG? */
  173. #if (CONFIG_ARC_MMU_VER >= 4)
  174. union bcr_mmu_4 mmu4;
  175. mmu4.word = read_aux_reg(ARC_AUX_MMU_BCR);
  176. if (mmu4.fields.pae)
  177. return true;
  178. #endif /* (CONFIG_ARC_MMU_VER >= 4) */
  179. return false;
  180. }
  181. static inlined_cachefunc bool icache_exists(void)
  182. {
  183. union bcr_di_cache ibcr;
  184. ibcr.word = read_aux_reg(ARC_BCR_IC_BUILD);
  185. return !!ibcr.fields.ver;
  186. }
  187. static inlined_cachefunc bool icache_enabled(void)
  188. {
  189. if (!icache_exists())
  190. return false;
  191. return !(read_aux_reg(ARC_AUX_IC_CTRL) & IC_CTRL_CACHE_DISABLE);
  192. }
  193. static inlined_cachefunc bool dcache_exists(void)
  194. {
  195. union bcr_di_cache dbcr;
  196. dbcr.word = read_aux_reg(ARC_BCR_DC_BUILD);
  197. return !!dbcr.fields.ver;
  198. }
  199. static inlined_cachefunc bool dcache_enabled(void)
  200. {
  201. if (!dcache_exists())
  202. return false;
  203. return !(read_aux_reg(ARC_AUX_DC_CTRL) & DC_CTRL_CACHE_DISABLE);
  204. }
  205. static inlined_cachefunc bool slc_exists(void)
  206. {
  207. if (is_isa_arcv2()) {
  208. union bcr_generic sbcr;
  209. sbcr.word = read_aux_reg(ARC_BCR_SLC);
  210. return !!sbcr.fields.ver;
  211. }
  212. return false;
  213. }
  214. static inlined_cachefunc bool slc_data_bypass(void)
  215. {
  216. /*
  217. * If L1 data cache is disabled SL$ is bypassed and all load/store
  218. * requests are sent directly to main memory.
  219. */
  220. return !dcache_enabled();
  221. }
  222. static inline bool ioc_exists(void)
  223. {
  224. if (is_isa_arcv2()) {
  225. union bcr_clust_cfg cbcr;
  226. cbcr.word = read_aux_reg(ARC_BCR_CLUSTER);
  227. return cbcr.fields.c;
  228. }
  229. return false;
  230. }
  231. static inline bool ioc_enabled(void)
  232. {
  233. /*
  234. * We check only CONFIG option instead of IOC HW state check as IOC
  235. * must be disabled by default.
  236. */
  237. if (is_ioc_enabled())
  238. return ioc_exists();
  239. return false;
  240. }
  241. static inlined_cachefunc void __slc_entire_op(const int op)
  242. {
  243. unsigned int ctrl;
  244. if (!slc_exists())
  245. return;
  246. ctrl = read_aux_reg(ARC_AUX_SLC_CTRL);
  247. if (!(op & OP_FLUSH)) /* i.e. OP_INV */
  248. ctrl &= ~SLC_CTRL_IM; /* clear IM: Disable flush before Inv */
  249. else
  250. ctrl |= SLC_CTRL_IM;
  251. write_aux_reg(ARC_AUX_SLC_CTRL, ctrl);
  252. if (op & OP_INV) /* Inv or flush-n-inv use same cmd reg */
  253. write_aux_reg(ARC_AUX_SLC_INVALIDATE, 0x1);
  254. else
  255. write_aux_reg(ARC_AUX_SLC_FLUSH, 0x1);
  256. /* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */
  257. read_aux_reg(ARC_AUX_SLC_CTRL);
  258. /* Important to wait for flush to complete */
  259. while (read_aux_reg(ARC_AUX_SLC_CTRL) & SLC_CTRL_BUSY);
  260. }
  261. static void slc_upper_region_init(void)
  262. {
  263. /*
  264. * ARC_AUX_SLC_RGN_START1 and ARC_AUX_SLC_RGN_END1 register exist
  265. * only if PAE exists in current HW. So we had to check pae_exist
  266. * before using them.
  267. */
  268. if (!pae_exists())
  269. return;
  270. /*
  271. * ARC_AUX_SLC_RGN_END1 and ARC_AUX_SLC_RGN_START1 are always == 0
  272. * as we don't use PAE40.
  273. */
  274. write_aux_reg(ARC_AUX_SLC_RGN_END1, 0);
  275. write_aux_reg(ARC_AUX_SLC_RGN_START1, 0);
  276. }
  277. static void __slc_rgn_op(unsigned long paddr, unsigned long sz, const int op)
  278. {
  279. #ifdef CONFIG_ISA_ARCV2
  280. unsigned int ctrl;
  281. unsigned long end;
  282. if (!slc_exists())
  283. return;
  284. /*
  285. * The Region Flush operation is specified by CTRL.RGN_OP[11..9]
  286. * - b'000 (default) is Flush,
  287. * - b'001 is Invalidate if CTRL.IM == 0
  288. * - b'001 is Flush-n-Invalidate if CTRL.IM == 1
  289. */
  290. ctrl = read_aux_reg(ARC_AUX_SLC_CTRL);
  291. /* Don't rely on default value of IM bit */
  292. if (!(op & OP_FLUSH)) /* i.e. OP_INV */
  293. ctrl &= ~SLC_CTRL_IM; /* clear IM: Disable flush before Inv */
  294. else
  295. ctrl |= SLC_CTRL_IM;
  296. if (op & OP_INV)
  297. ctrl |= SLC_CTRL_RGN_OP_INV; /* Inv or flush-n-inv */
  298. else
  299. ctrl &= ~SLC_CTRL_RGN_OP_INV;
  300. write_aux_reg(ARC_AUX_SLC_CTRL, ctrl);
  301. /*
  302. * Lower bits are ignored, no need to clip
  303. * END needs to be setup before START (latter triggers the operation)
  304. * END can't be same as START, so add (l2_line_sz - 1) to sz
  305. */
  306. end = paddr + sz + gd->arch.slc_line_sz - 1;
  307. /*
  308. * Upper addresses (ARC_AUX_SLC_RGN_END1 and ARC_AUX_SLC_RGN_START1)
  309. * are always == 0 as we don't use PAE40, so we only setup lower ones
  310. * (ARC_AUX_SLC_RGN_END and ARC_AUX_SLC_RGN_START)
  311. */
  312. write_aux_reg(ARC_AUX_SLC_RGN_END, end);
  313. write_aux_reg(ARC_AUX_SLC_RGN_START, paddr);
  314. /* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */
  315. read_aux_reg(ARC_AUX_SLC_CTRL);
  316. while (read_aux_reg(ARC_AUX_SLC_CTRL) & SLC_CTRL_BUSY);
  317. #endif /* CONFIG_ISA_ARCV2 */
  318. }
  319. static void arc_ioc_setup(void)
  320. {
  321. /* IOC Aperture start is equal to DDR start */
  322. unsigned int ap_base = CONFIG_SYS_SDRAM_BASE;
  323. /* IOC Aperture size is equal to DDR size */
  324. long ap_size = CONFIG_SYS_SDRAM_SIZE;
  325. /* Unsupported configuration. See [ NOTE 2 ] for more details. */
  326. if (!slc_exists())
  327. panic("Try to enable IOC but SLC is not present");
  328. /* Unsupported configuration. See [ NOTE 2 ] for more details. */
  329. if (!dcache_enabled())
  330. panic("Try to enable IOC but L1 D$ is disabled");
  331. if (!is_power_of_2(ap_size) || ap_size < 4096)
  332. panic("IOC Aperture size must be power of 2 and bigger 4Kib");
  333. /* IOC Aperture start must be aligned to the size of the aperture */
  334. if (ap_base % ap_size != 0)
  335. panic("IOC Aperture start must be aligned to the size of the aperture");
  336. flush_n_invalidate_dcache_all();
  337. /*
  338. * IOC Aperture size decoded as 2 ^ (SIZE + 2) KB,
  339. * so setting 0x11 implies 512M, 0x12 implies 1G...
  340. */
  341. write_aux_reg(ARC_AUX_IO_COH_AP0_SIZE,
  342. order_base_2(ap_size / 1024) - 2);
  343. write_aux_reg(ARC_AUX_IO_COH_AP0_BASE, ap_base >> 12);
  344. write_aux_reg(ARC_AUX_IO_COH_PARTIAL, 1);
  345. write_aux_reg(ARC_AUX_IO_COH_ENABLE, 1);
  346. }
  347. static void read_decode_cache_bcr_arcv2(void)
  348. {
  349. #ifdef CONFIG_ISA_ARCV2
  350. union bcr_slc_cfg slc_cfg;
  351. if (slc_exists()) {
  352. slc_cfg.word = read_aux_reg(ARC_AUX_SLC_CONFIG);
  353. gd->arch.slc_line_sz = (slc_cfg.fields.lsz == 0) ? 128 : 64;
  354. /*
  355. * We don't support configuration where L1 I$ or L1 D$ is
  356. * absent but SL$ exists. See [ NOTE 2 ] for more details.
  357. */
  358. if (!icache_exists() || !dcache_exists())
  359. panic("Unsupported cache configuration: SLC exists but one of L1 caches is absent");
  360. }
  361. #endif /* CONFIG_ISA_ARCV2 */
  362. }
  363. void read_decode_cache_bcr(void)
  364. {
  365. int dc_line_sz = 0, ic_line_sz = 0;
  366. union bcr_di_cache ibcr, dbcr;
  367. ibcr.word = read_aux_reg(ARC_BCR_IC_BUILD);
  368. if (ibcr.fields.ver) {
  369. gd->arch.l1_line_sz = ic_line_sz = 8 << ibcr.fields.line_len;
  370. if (!ic_line_sz)
  371. panic("Instruction exists but line length is 0\n");
  372. }
  373. dbcr.word = read_aux_reg(ARC_BCR_DC_BUILD);
  374. if (dbcr.fields.ver) {
  375. gd->arch.l1_line_sz = dc_line_sz = 16 << dbcr.fields.line_len;
  376. if (!dc_line_sz)
  377. panic("Data cache exists but line length is 0\n");
  378. }
  379. if (ic_line_sz && dc_line_sz && (ic_line_sz != dc_line_sz))
  380. panic("Instruction and data cache line lengths differ\n");
  381. }
  382. void cache_init(void)
  383. {
  384. read_decode_cache_bcr();
  385. if (is_isa_arcv2())
  386. read_decode_cache_bcr_arcv2();
  387. if (is_isa_arcv2() && ioc_enabled())
  388. arc_ioc_setup();
  389. if (is_isa_arcv2() && slc_exists())
  390. slc_upper_region_init();
  391. }
  392. int icache_status(void)
  393. {
  394. return icache_enabled();
  395. }
  396. void icache_enable(void)
  397. {
  398. if (icache_exists())
  399. write_aux_reg(ARC_AUX_IC_CTRL, read_aux_reg(ARC_AUX_IC_CTRL) &
  400. ~IC_CTRL_CACHE_DISABLE);
  401. }
  402. void icache_disable(void)
  403. {
  404. if (!icache_exists())
  405. return;
  406. __ic_entire_invalidate();
  407. write_aux_reg(ARC_AUX_IC_CTRL, read_aux_reg(ARC_AUX_IC_CTRL) |
  408. IC_CTRL_CACHE_DISABLE);
  409. }
  410. /* IC supports only invalidation */
  411. static inlined_cachefunc void __ic_entire_invalidate(void)
  412. {
  413. if (!icache_enabled())
  414. return;
  415. /* Any write to IC_IVIC register triggers invalidation of entire I$ */
  416. write_aux_reg(ARC_AUX_IC_IVIC, 1);
  417. /*
  418. * As per ARC HS databook (see chapter 5.3.3.2)
  419. * it is required to add 3 NOPs after each write to IC_IVIC.
  420. */
  421. __builtin_arc_nop();
  422. __builtin_arc_nop();
  423. __builtin_arc_nop();
  424. read_aux_reg(ARC_AUX_IC_CTRL); /* blocks */
  425. }
  426. void invalidate_icache_all(void)
  427. {
  428. __ic_entire_invalidate();
  429. /*
  430. * If SL$ is bypassed for data it is used only for instructions,
  431. * so we need to invalidate it too.
  432. * TODO: HS 3.0 supports SLC disable so we need to check slc
  433. * enable/disable status here.
  434. */
  435. if (is_isa_arcv2() && slc_data_bypass())
  436. __slc_entire_op(OP_INV);
  437. }
  438. int dcache_status(void)
  439. {
  440. return dcache_enabled();
  441. }
  442. void dcache_enable(void)
  443. {
  444. if (!dcache_exists())
  445. return;
  446. write_aux_reg(ARC_AUX_DC_CTRL, read_aux_reg(ARC_AUX_DC_CTRL) &
  447. ~(DC_CTRL_INV_MODE_FLUSH | DC_CTRL_CACHE_DISABLE));
  448. }
  449. void dcache_disable(void)
  450. {
  451. if (!dcache_exists())
  452. return;
  453. __dc_entire_op(OP_FLUSH_N_INV);
  454. /*
  455. * As SLC will be bypassed for data after L1 D$ disable we need to
  456. * flush it first before L1 D$ disable. Also we invalidate SLC to
  457. * avoid any inconsistent data problems after enabling L1 D$ again with
  458. * dcache_enable function.
  459. */
  460. if (is_isa_arcv2())
  461. __slc_entire_op(OP_FLUSH_N_INV);
  462. write_aux_reg(ARC_AUX_DC_CTRL, read_aux_reg(ARC_AUX_DC_CTRL) |
  463. DC_CTRL_CACHE_DISABLE);
  464. }
  465. /* Common Helper for Line Operations on D-cache */
  466. static inline void __dcache_line_loop(unsigned long paddr, unsigned long sz,
  467. const int cacheop)
  468. {
  469. unsigned int aux_cmd;
  470. int num_lines;
  471. /* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */
  472. aux_cmd = cacheop & OP_INV ? ARC_AUX_DC_IVDL : ARC_AUX_DC_FLDL;
  473. sz += paddr & ~CACHE_LINE_MASK;
  474. paddr &= CACHE_LINE_MASK;
  475. num_lines = DIV_ROUND_UP(sz, gd->arch.l1_line_sz);
  476. while (num_lines-- > 0) {
  477. #if (CONFIG_ARC_MMU_VER == 3)
  478. write_aux_reg(ARC_AUX_DC_PTAG, paddr);
  479. #endif
  480. write_aux_reg(aux_cmd, paddr);
  481. paddr += gd->arch.l1_line_sz;
  482. }
  483. }
  484. static inlined_cachefunc void __before_dc_op(const int op)
  485. {
  486. unsigned int ctrl;
  487. ctrl = read_aux_reg(ARC_AUX_DC_CTRL);
  488. /* IM bit implies flush-n-inv, instead of vanilla inv */
  489. if (op == OP_INV)
  490. ctrl &= ~DC_CTRL_INV_MODE_FLUSH;
  491. else
  492. ctrl |= DC_CTRL_INV_MODE_FLUSH;
  493. write_aux_reg(ARC_AUX_DC_CTRL, ctrl);
  494. }
  495. static inlined_cachefunc void __after_dc_op(const int op)
  496. {
  497. if (op & OP_FLUSH) /* flush / flush-n-inv both wait */
  498. while (read_aux_reg(ARC_AUX_DC_CTRL) & DC_CTRL_FLUSH_STATUS);
  499. }
  500. static inlined_cachefunc void __dc_entire_op(const int cacheop)
  501. {
  502. int aux;
  503. if (!dcache_enabled())
  504. return;
  505. __before_dc_op(cacheop);
  506. if (cacheop & OP_INV) /* Inv or flush-n-inv use same cmd reg */
  507. aux = ARC_AUX_DC_IVDC;
  508. else
  509. aux = ARC_AUX_DC_FLSH;
  510. write_aux_reg(aux, 0x1);
  511. __after_dc_op(cacheop);
  512. }
  513. static inline void __dc_line_op(unsigned long paddr, unsigned long sz,
  514. const int cacheop)
  515. {
  516. if (!dcache_enabled())
  517. return;
  518. __before_dc_op(cacheop);
  519. __dcache_line_loop(paddr, sz, cacheop);
  520. __after_dc_op(cacheop);
  521. }
  522. void invalidate_dcache_range(unsigned long start, unsigned long end)
  523. {
  524. if (start >= end)
  525. return;
  526. /*
  527. * ARCv1 -> call __dc_line_op
  528. * ARCv2 && L1 D$ disabled -> nothing
  529. * ARCv2 && L1 D$ enabled && IOC enabled -> nothing
  530. * ARCv2 && L1 D$ enabled && no IOC -> call __dc_line_op; call __slc_rgn_op
  531. */
  532. if (!is_isa_arcv2() || !ioc_enabled())
  533. __dc_line_op(start, end - start, OP_INV);
  534. if (is_isa_arcv2() && !ioc_enabled() && !slc_data_bypass())
  535. __slc_rgn_op(start, end - start, OP_INV);
  536. }
  537. void flush_dcache_range(unsigned long start, unsigned long end)
  538. {
  539. if (start >= end)
  540. return;
  541. /*
  542. * ARCv1 -> call __dc_line_op
  543. * ARCv2 && L1 D$ disabled -> nothing
  544. * ARCv2 && L1 D$ enabled && IOC enabled -> nothing
  545. * ARCv2 && L1 D$ enabled && no IOC -> call __dc_line_op; call __slc_rgn_op
  546. */
  547. if (!is_isa_arcv2() || !ioc_enabled())
  548. __dc_line_op(start, end - start, OP_FLUSH);
  549. if (is_isa_arcv2() && !ioc_enabled() && !slc_data_bypass())
  550. __slc_rgn_op(start, end - start, OP_FLUSH);
  551. }
  552. void flush_cache(unsigned long start, unsigned long size)
  553. {
  554. flush_dcache_range(start, start + size);
  555. }
  556. /*
  557. * As invalidate_dcache_all() is not used in generic U-Boot code and as we
  558. * don't need it in arch/arc code alone (invalidate without flush) we implement
  559. * flush_n_invalidate_dcache_all (flush and invalidate in 1 operation) because
  560. * it's much safer. See [ NOTE 1 ] for more details.
  561. */
  562. void flush_n_invalidate_dcache_all(void)
  563. {
  564. __dc_entire_op(OP_FLUSH_N_INV);
  565. if (is_isa_arcv2() && !slc_data_bypass())
  566. __slc_entire_op(OP_FLUSH_N_INV);
  567. }
  568. void flush_dcache_all(void)
  569. {
  570. __dc_entire_op(OP_FLUSH);
  571. if (is_isa_arcv2() && !slc_data_bypass())
  572. __slc_entire_op(OP_FLUSH);
  573. }
  574. /*
  575. * This is function to cleanup all caches (and therefore sync I/D caches) which
  576. * can be used for cleanup before linux launch or to sync caches during
  577. * relocation.
  578. */
  579. void sync_n_cleanup_cache_all(void)
  580. {
  581. __dc_entire_op(OP_FLUSH_N_INV);
  582. /*
  583. * If SL$ is bypassed for data it is used only for instructions,
  584. * and we shouldn't flush it. So invalidate it instead of flush_n_inv.
  585. */
  586. if (is_isa_arcv2()) {
  587. if (slc_data_bypass())
  588. __slc_entire_op(OP_INV);
  589. else
  590. __slc_entire_op(OP_FLUSH_N_INV);
  591. }
  592. __ic_entire_invalidate();
  593. }