dlaqr5.c 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026
  1. /* dlaqr5.f -- translated by f2c (version 20061008).
  2. You must link the resulting object file with libf2c:
  3. on Microsoft Windows system, link with libf2c.lib;
  4. on Linux or Unix systems, link with .../path/to/libf2c.a -lm
  5. or, if you install libf2c.a in a standard place, with -lf2c -lm
  6. -- in that order, at the end of the command line, as in
  7. cc *.o -lf2c -lm
  8. Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
  9. http://www.netlib.org/f2c/libf2c.zip
  10. */
  11. #include "f2c.h"
  12. #include "blaswrap.h"
  13. /* Table of constant values */
  14. static doublereal c_b7 = 0.;
  15. static doublereal c_b8 = 1.;
  16. static integer c__3 = 3;
  17. static integer c__1 = 1;
  18. static integer c__2 = 2;
  19. /* Subroutine */ int _starpu_dlaqr5_(logical *wantt, logical *wantz, integer *kacc22,
  20. integer *n, integer *ktop, integer *kbot, integer *nshfts, doublereal
  21. *sr, doublereal *si, doublereal *h__, integer *ldh, integer *iloz,
  22. integer *ihiz, doublereal *z__, integer *ldz, doublereal *v, integer *
  23. ldv, doublereal *u, integer *ldu, integer *nv, doublereal *wv,
  24. integer *ldwv, integer *nh, doublereal *wh, integer *ldwh)
  25. {
  26. /* System generated locals */
  27. integer h_dim1, h_offset, u_dim1, u_offset, v_dim1, v_offset, wh_dim1,
  28. wh_offset, wv_dim1, wv_offset, z_dim1, z_offset, i__1, i__2, i__3,
  29. i__4, i__5, i__6, i__7;
  30. doublereal d__1, d__2, d__3, d__4, d__5;
  31. /* Local variables */
  32. integer i__, j, k, m, i2, j2, i4, j4, k1;
  33. doublereal h11, h12, h21, h22;
  34. integer m22, ns, nu;
  35. doublereal vt[3], scl;
  36. integer kdu, kms;
  37. doublereal ulp;
  38. integer knz, kzs;
  39. doublereal tst1, tst2, beta;
  40. logical blk22, bmp22;
  41. integer mend, jcol, jlen, jbot, mbot;
  42. doublereal swap;
  43. integer jtop, jrow, mtop;
  44. doublereal alpha;
  45. logical accum;
  46. extern /* Subroutine */ int _starpu_dgemm_(char *, char *, integer *, integer *,
  47. integer *, doublereal *, doublereal *, integer *, doublereal *,
  48. integer *, doublereal *, doublereal *, integer *);
  49. integer ndcol, incol, krcol, nbmps;
  50. extern /* Subroutine */ int _starpu_dtrmm_(char *, char *, char *, char *,
  51. integer *, integer *, doublereal *, doublereal *, integer *,
  52. doublereal *, integer *), _starpu_dlaqr1_(
  53. integer *, doublereal *, integer *, doublereal *, doublereal *,
  54. doublereal *, doublereal *, doublereal *), _starpu_dlabad_(doublereal *,
  55. doublereal *);
  56. extern doublereal _starpu_dlamch_(char *);
  57. extern /* Subroutine */ int _starpu_dlarfg_(integer *, doublereal *, doublereal *,
  58. integer *, doublereal *), _starpu_dlacpy_(char *, integer *, integer *,
  59. doublereal *, integer *, doublereal *, integer *);
  60. doublereal safmin;
  61. extern /* Subroutine */ int _starpu_dlaset_(char *, integer *, integer *,
  62. doublereal *, doublereal *, doublereal *, integer *);
  63. doublereal safmax, refsum;
  64. integer mstart;
  65. doublereal smlnum;
  66. /* -- LAPACK auxiliary routine (version 3.2) -- */
  67. /* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
  68. /* November 2006 */
  69. /* .. Scalar Arguments .. */
  70. /* .. */
  71. /* .. Array Arguments .. */
  72. /* .. */
  73. /* This auxiliary subroutine called by DLAQR0 performs a */
  74. /* single small-bulge multi-shift QR sweep. */
  75. /* WANTT (input) logical scalar */
  76. /* WANTT = .true. if the quasi-triangular Schur factor */
  77. /* is being computed. WANTT is set to .false. otherwise. */
  78. /* WANTZ (input) logical scalar */
  79. /* WANTZ = .true. if the orthogonal Schur factor is being */
  80. /* computed. WANTZ is set to .false. otherwise. */
  81. /* KACC22 (input) integer with value 0, 1, or 2. */
  82. /* Specifies the computation mode of far-from-diagonal */
  83. /* orthogonal updates. */
  84. /* = 0: DLAQR5 does not accumulate reflections and does not */
  85. /* use matrix-matrix multiply to update far-from-diagonal */
  86. /* matrix entries. */
  87. /* = 1: DLAQR5 accumulates reflections and uses matrix-matrix */
  88. /* multiply to update the far-from-diagonal matrix entries. */
  89. /* = 2: DLAQR5 accumulates reflections, uses matrix-matrix */
  90. /* multiply to update the far-from-diagonal matrix entries, */
  91. /* and takes advantage of 2-by-2 block structure during */
  92. /* matrix multiplies. */
  93. /* N (input) integer scalar */
  94. /* N is the order of the Hessenberg matrix H upon which this */
  95. /* subroutine operates. */
  96. /* KTOP (input) integer scalar */
  97. /* KBOT (input) integer scalar */
  98. /* These are the first and last rows and columns of an */
  99. /* isolated diagonal block upon which the QR sweep is to be */
  100. /* applied. It is assumed without a check that */
  101. /* either KTOP = 1 or H(KTOP,KTOP-1) = 0 */
  102. /* and */
  103. /* either KBOT = N or H(KBOT+1,KBOT) = 0. */
  104. /* NSHFTS (input) integer scalar */
  105. /* NSHFTS gives the number of simultaneous shifts. NSHFTS */
  106. /* must be positive and even. */
  107. /* SR (input/output) DOUBLE PRECISION array of size (NSHFTS) */
  108. /* SI (input/output) DOUBLE PRECISION array of size (NSHFTS) */
  109. /* SR contains the real parts and SI contains the imaginary */
  110. /* parts of the NSHFTS shifts of origin that define the */
  111. /* multi-shift QR sweep. On output SR and SI may be */
  112. /* reordered. */
  113. /* H (input/output) DOUBLE PRECISION array of size (LDH,N) */
  114. /* On input H contains a Hessenberg matrix. On output a */
  115. /* multi-shift QR sweep with shifts SR(J)+i*SI(J) is applied */
  116. /* to the isolated diagonal block in rows and columns KTOP */
  117. /* through KBOT. */
  118. /* LDH (input) integer scalar */
  119. /* LDH is the leading dimension of H just as declared in the */
  120. /* calling procedure. LDH.GE.MAX(1,N). */
  121. /* ILOZ (input) INTEGER */
  122. /* IHIZ (input) INTEGER */
  123. /* Specify the rows of Z to which transformations must be */
  124. /* applied if WANTZ is .TRUE.. 1 .LE. ILOZ .LE. IHIZ .LE. N */
  125. /* Z (input/output) DOUBLE PRECISION array of size (LDZ,IHI) */
  126. /* If WANTZ = .TRUE., then the QR Sweep orthogonal */
  127. /* similarity transformation is accumulated into */
  128. /* Z(ILOZ:IHIZ,ILO:IHI) from the right. */
  129. /* If WANTZ = .FALSE., then Z is unreferenced. */
  130. /* LDZ (input) integer scalar */
  131. /* LDA is the leading dimension of Z just as declared in */
  132. /* the calling procedure. LDZ.GE.N. */
  133. /* V (workspace) DOUBLE PRECISION array of size (LDV,NSHFTS/2) */
  134. /* LDV (input) integer scalar */
  135. /* LDV is the leading dimension of V as declared in the */
  136. /* calling procedure. LDV.GE.3. */
  137. /* U (workspace) DOUBLE PRECISION array of size */
  138. /* (LDU,3*NSHFTS-3) */
  139. /* LDU (input) integer scalar */
  140. /* LDU is the leading dimension of U just as declared in the */
  141. /* in the calling subroutine. LDU.GE.3*NSHFTS-3. */
  142. /* NH (input) integer scalar */
  143. /* NH is the number of columns in array WH available for */
  144. /* workspace. NH.GE.1. */
  145. /* WH (workspace) DOUBLE PRECISION array of size (LDWH,NH) */
  146. /* LDWH (input) integer scalar */
  147. /* Leading dimension of WH just as declared in the */
  148. /* calling procedure. LDWH.GE.3*NSHFTS-3. */
  149. /* NV (input) integer scalar */
  150. /* NV is the number of rows in WV agailable for workspace. */
  151. /* NV.GE.1. */
  152. /* WV (workspace) DOUBLE PRECISION array of size */
  153. /* (LDWV,3*NSHFTS-3) */
  154. /* LDWV (input) integer scalar */
  155. /* LDWV is the leading dimension of WV as declared in the */
  156. /* in the calling subroutine. LDWV.GE.NV. */
  157. /* ================================================================ */
  158. /* Based on contributions by */
  159. /* Karen Braman and Ralph Byers, Department of Mathematics, */
  160. /* University of Kansas, USA */
  161. /* ================================================================ */
  162. /* Reference: */
  163. /* K. Braman, R. Byers and R. Mathias, The Multi-Shift QR */
  164. /* Algorithm Part I: Maintaining Well Focused Shifts, and */
  165. /* Level 3 Performance, SIAM Journal of Matrix Analysis, */
  166. /* volume 23, pages 929--947, 2002. */
  167. /* ================================================================ */
  168. /* .. Parameters .. */
  169. /* .. */
  170. /* .. Local Scalars .. */
  171. /* .. */
  172. /* .. External Functions .. */
  173. /* .. */
  174. /* .. Intrinsic Functions .. */
  175. /* .. */
  176. /* .. Local Arrays .. */
  177. /* .. */
  178. /* .. External Subroutines .. */
  179. /* .. */
  180. /* .. Executable Statements .. */
  181. /* ==== If there are no shifts, then there is nothing to do. ==== */
  182. /* Parameter adjustments */
  183. --sr;
  184. --si;
  185. h_dim1 = *ldh;
  186. h_offset = 1 + h_dim1;
  187. h__ -= h_offset;
  188. z_dim1 = *ldz;
  189. z_offset = 1 + z_dim1;
  190. z__ -= z_offset;
  191. v_dim1 = *ldv;
  192. v_offset = 1 + v_dim1;
  193. v -= v_offset;
  194. u_dim1 = *ldu;
  195. u_offset = 1 + u_dim1;
  196. u -= u_offset;
  197. wv_dim1 = *ldwv;
  198. wv_offset = 1 + wv_dim1;
  199. wv -= wv_offset;
  200. wh_dim1 = *ldwh;
  201. wh_offset = 1 + wh_dim1;
  202. wh -= wh_offset;
  203. /* Function Body */
  204. if (*nshfts < 2) {
  205. return 0;
  206. }
  207. /* ==== If the active block is empty or 1-by-1, then there */
  208. /* . is nothing to do. ==== */
  209. if (*ktop >= *kbot) {
  210. return 0;
  211. }
  212. /* ==== Shuffle shifts into pairs of real shifts and pairs */
  213. /* . of complex conjugate shifts assuming complex */
  214. /* . conjugate shifts are already adjacent to one */
  215. /* . another. ==== */
  216. i__1 = *nshfts - 2;
  217. for (i__ = 1; i__ <= i__1; i__ += 2) {
  218. if (si[i__] != -si[i__ + 1]) {
  219. swap = sr[i__];
  220. sr[i__] = sr[i__ + 1];
  221. sr[i__ + 1] = sr[i__ + 2];
  222. sr[i__ + 2] = swap;
  223. swap = si[i__];
  224. si[i__] = si[i__ + 1];
  225. si[i__ + 1] = si[i__ + 2];
  226. si[i__ + 2] = swap;
  227. }
  228. /* L10: */
  229. }
  230. /* ==== NSHFTS is supposed to be even, but if it is odd, */
  231. /* . then simply reduce it by one. The shuffle above */
  232. /* . ensures that the dropped shift is real and that */
  233. /* . the remaining shifts are paired. ==== */
  234. ns = *nshfts - *nshfts % 2;
  235. /* ==== Machine constants for deflation ==== */
  236. safmin = _starpu_dlamch_("SAFE MINIMUM");
  237. safmax = 1. / safmin;
  238. _starpu_dlabad_(&safmin, &safmax);
  239. ulp = _starpu_dlamch_("PRECISION");
  240. smlnum = safmin * ((doublereal) (*n) / ulp);
  241. /* ==== Use accumulated reflections to update far-from-diagonal */
  242. /* . entries ? ==== */
  243. accum = *kacc22 == 1 || *kacc22 == 2;
  244. /* ==== If so, exploit the 2-by-2 block structure? ==== */
  245. blk22 = ns > 2 && *kacc22 == 2;
  246. /* ==== clear trash ==== */
  247. if (*ktop + 2 <= *kbot) {
  248. h__[*ktop + 2 + *ktop * h_dim1] = 0.;
  249. }
  250. /* ==== NBMPS = number of 2-shift bulges in the chain ==== */
  251. nbmps = ns / 2;
  252. /* ==== KDU = width of slab ==== */
  253. kdu = nbmps * 6 - 3;
  254. /* ==== Create and chase chains of NBMPS bulges ==== */
  255. i__1 = *kbot - 2;
  256. i__2 = nbmps * 3 - 2;
  257. for (incol = (1 - nbmps) * 3 + *ktop - 1; i__2 < 0 ? incol >= i__1 :
  258. incol <= i__1; incol += i__2) {
  259. ndcol = incol + kdu;
  260. if (accum) {
  261. _starpu_dlaset_("ALL", &kdu, &kdu, &c_b7, &c_b8, &u[u_offset], ldu);
  262. }
  263. /* ==== Near-the-diagonal bulge chase. The following loop */
  264. /* . performs the near-the-diagonal part of a small bulge */
  265. /* . multi-shift QR sweep. Each 6*NBMPS-2 column diagonal */
  266. /* . chunk extends from column INCOL to column NDCOL */
  267. /* . (including both column INCOL and column NDCOL). The */
  268. /* . following loop chases a 3*NBMPS column long chain of */
  269. /* . NBMPS bulges 3*NBMPS-2 columns to the right. (INCOL */
  270. /* . may be less than KTOP and and NDCOL may be greater than */
  271. /* . KBOT indicating phantom columns from which to chase */
  272. /* . bulges before they are actually introduced or to which */
  273. /* . to chase bulges beyond column KBOT.) ==== */
  274. /* Computing MIN */
  275. i__4 = incol + nbmps * 3 - 3, i__5 = *kbot - 2;
  276. i__3 = min(i__4,i__5);
  277. for (krcol = incol; krcol <= i__3; ++krcol) {
  278. /* ==== Bulges number MTOP to MBOT are active double implicit */
  279. /* . shift bulges. There may or may not also be small */
  280. /* . 2-by-2 bulge, if there is room. The inactive bulges */
  281. /* . (if any) must wait until the active bulges have moved */
  282. /* . down the diagonal to make room. The phantom matrix */
  283. /* . paradigm described above helps keep track. ==== */
  284. /* Computing MAX */
  285. i__4 = 1, i__5 = (*ktop - 1 - krcol + 2) / 3 + 1;
  286. mtop = max(i__4,i__5);
  287. /* Computing MIN */
  288. i__4 = nbmps, i__5 = (*kbot - krcol) / 3;
  289. mbot = min(i__4,i__5);
  290. m22 = mbot + 1;
  291. bmp22 = mbot < nbmps && krcol + (m22 - 1) * 3 == *kbot - 2;
  292. /* ==== Generate reflections to chase the chain right */
  293. /* . one column. (The minimum value of K is KTOP-1.) ==== */
  294. i__4 = mbot;
  295. for (m = mtop; m <= i__4; ++m) {
  296. k = krcol + (m - 1) * 3;
  297. if (k == *ktop - 1) {
  298. _starpu_dlaqr1_(&c__3, &h__[*ktop + *ktop * h_dim1], ldh, &sr[(m
  299. << 1) - 1], &si[(m << 1) - 1], &sr[m * 2], &si[m *
  300. 2], &v[m * v_dim1 + 1]);
  301. alpha = v[m * v_dim1 + 1];
  302. _starpu_dlarfg_(&c__3, &alpha, &v[m * v_dim1 + 2], &c__1, &v[m *
  303. v_dim1 + 1]);
  304. } else {
  305. beta = h__[k + 1 + k * h_dim1];
  306. v[m * v_dim1 + 2] = h__[k + 2 + k * h_dim1];
  307. v[m * v_dim1 + 3] = h__[k + 3 + k * h_dim1];
  308. _starpu_dlarfg_(&c__3, &beta, &v[m * v_dim1 + 2], &c__1, &v[m *
  309. v_dim1 + 1]);
  310. /* ==== A Bulge may collapse because of vigilant */
  311. /* . deflation or destructive underflow. In the */
  312. /* . underflow case, try the two-small-subdiagonals */
  313. /* . trick to try to reinflate the bulge. ==== */
  314. if (h__[k + 3 + k * h_dim1] != 0. || h__[k + 3 + (k + 1) *
  315. h_dim1] != 0. || h__[k + 3 + (k + 2) * h_dim1] ==
  316. 0.) {
  317. /* ==== Typical case: not collapsed (yet). ==== */
  318. h__[k + 1 + k * h_dim1] = beta;
  319. h__[k + 2 + k * h_dim1] = 0.;
  320. h__[k + 3 + k * h_dim1] = 0.;
  321. } else {
  322. /* ==== Atypical case: collapsed. Attempt to */
  323. /* . reintroduce ignoring H(K+1,K) and H(K+2,K). */
  324. /* . If the fill resulting from the new */
  325. /* . reflector is too large, then abandon it. */
  326. /* . Otherwise, use the new one. ==== */
  327. _starpu_dlaqr1_(&c__3, &h__[k + 1 + (k + 1) * h_dim1], ldh, &
  328. sr[(m << 1) - 1], &si[(m << 1) - 1], &sr[m *
  329. 2], &si[m * 2], vt);
  330. alpha = vt[0];
  331. _starpu_dlarfg_(&c__3, &alpha, &vt[1], &c__1, vt);
  332. refsum = vt[0] * (h__[k + 1 + k * h_dim1] + vt[1] *
  333. h__[k + 2 + k * h_dim1]);
  334. if ((d__1 = h__[k + 2 + k * h_dim1] - refsum * vt[1],
  335. abs(d__1)) + (d__2 = refsum * vt[2], abs(d__2)
  336. ) > ulp * ((d__3 = h__[k + k * h_dim1], abs(
  337. d__3)) + (d__4 = h__[k + 1 + (k + 1) * h_dim1]
  338. , abs(d__4)) + (d__5 = h__[k + 2 + (k + 2) *
  339. h_dim1], abs(d__5)))) {
  340. /* ==== Starting a new bulge here would */
  341. /* . create non-negligible fill. Use */
  342. /* . the old one with trepidation. ==== */
  343. h__[k + 1 + k * h_dim1] = beta;
  344. h__[k + 2 + k * h_dim1] = 0.;
  345. h__[k + 3 + k * h_dim1] = 0.;
  346. } else {
  347. /* ==== Stating a new bulge here would */
  348. /* . create only negligible fill. */
  349. /* . Replace the old reflector with */
  350. /* . the new one. ==== */
  351. h__[k + 1 + k * h_dim1] -= refsum;
  352. h__[k + 2 + k * h_dim1] = 0.;
  353. h__[k + 3 + k * h_dim1] = 0.;
  354. v[m * v_dim1 + 1] = vt[0];
  355. v[m * v_dim1 + 2] = vt[1];
  356. v[m * v_dim1 + 3] = vt[2];
  357. }
  358. }
  359. }
  360. /* L20: */
  361. }
  362. /* ==== Generate a 2-by-2 reflection, if needed. ==== */
  363. k = krcol + (m22 - 1) * 3;
  364. if (bmp22) {
  365. if (k == *ktop - 1) {
  366. _starpu_dlaqr1_(&c__2, &h__[k + 1 + (k + 1) * h_dim1], ldh, &sr[(
  367. m22 << 1) - 1], &si[(m22 << 1) - 1], &sr[m22 * 2],
  368. &si[m22 * 2], &v[m22 * v_dim1 + 1]);
  369. beta = v[m22 * v_dim1 + 1];
  370. _starpu_dlarfg_(&c__2, &beta, &v[m22 * v_dim1 + 2], &c__1, &v[m22
  371. * v_dim1 + 1]);
  372. } else {
  373. beta = h__[k + 1 + k * h_dim1];
  374. v[m22 * v_dim1 + 2] = h__[k + 2 + k * h_dim1];
  375. _starpu_dlarfg_(&c__2, &beta, &v[m22 * v_dim1 + 2], &c__1, &v[m22
  376. * v_dim1 + 1]);
  377. h__[k + 1 + k * h_dim1] = beta;
  378. h__[k + 2 + k * h_dim1] = 0.;
  379. }
  380. }
  381. /* ==== Multiply H by reflections from the left ==== */
  382. if (accum) {
  383. jbot = min(ndcol,*kbot);
  384. } else if (*wantt) {
  385. jbot = *n;
  386. } else {
  387. jbot = *kbot;
  388. }
  389. i__4 = jbot;
  390. for (j = max(*ktop,krcol); j <= i__4; ++j) {
  391. /* Computing MIN */
  392. i__5 = mbot, i__6 = (j - krcol + 2) / 3;
  393. mend = min(i__5,i__6);
  394. i__5 = mend;
  395. for (m = mtop; m <= i__5; ++m) {
  396. k = krcol + (m - 1) * 3;
  397. refsum = v[m * v_dim1 + 1] * (h__[k + 1 + j * h_dim1] + v[
  398. m * v_dim1 + 2] * h__[k + 2 + j * h_dim1] + v[m *
  399. v_dim1 + 3] * h__[k + 3 + j * h_dim1]);
  400. h__[k + 1 + j * h_dim1] -= refsum;
  401. h__[k + 2 + j * h_dim1] -= refsum * v[m * v_dim1 + 2];
  402. h__[k + 3 + j * h_dim1] -= refsum * v[m * v_dim1 + 3];
  403. /* L30: */
  404. }
  405. /* L40: */
  406. }
  407. if (bmp22) {
  408. k = krcol + (m22 - 1) * 3;
  409. /* Computing MAX */
  410. i__4 = k + 1;
  411. i__5 = jbot;
  412. for (j = max(i__4,*ktop); j <= i__5; ++j) {
  413. refsum = v[m22 * v_dim1 + 1] * (h__[k + 1 + j * h_dim1] +
  414. v[m22 * v_dim1 + 2] * h__[k + 2 + j * h_dim1]);
  415. h__[k + 1 + j * h_dim1] -= refsum;
  416. h__[k + 2 + j * h_dim1] -= refsum * v[m22 * v_dim1 + 2];
  417. /* L50: */
  418. }
  419. }
  420. /* ==== Multiply H by reflections from the right. */
  421. /* . Delay filling in the last row until the */
  422. /* . vigilant deflation check is complete. ==== */
  423. if (accum) {
  424. jtop = max(*ktop,incol);
  425. } else if (*wantt) {
  426. jtop = 1;
  427. } else {
  428. jtop = *ktop;
  429. }
  430. i__5 = mbot;
  431. for (m = mtop; m <= i__5; ++m) {
  432. if (v[m * v_dim1 + 1] != 0.) {
  433. k = krcol + (m - 1) * 3;
  434. /* Computing MIN */
  435. i__6 = *kbot, i__7 = k + 3;
  436. i__4 = min(i__6,i__7);
  437. for (j = jtop; j <= i__4; ++j) {
  438. refsum = v[m * v_dim1 + 1] * (h__[j + (k + 1) *
  439. h_dim1] + v[m * v_dim1 + 2] * h__[j + (k + 2)
  440. * h_dim1] + v[m * v_dim1 + 3] * h__[j + (k +
  441. 3) * h_dim1]);
  442. h__[j + (k + 1) * h_dim1] -= refsum;
  443. h__[j + (k + 2) * h_dim1] -= refsum * v[m * v_dim1 +
  444. 2];
  445. h__[j + (k + 3) * h_dim1] -= refsum * v[m * v_dim1 +
  446. 3];
  447. /* L60: */
  448. }
  449. if (accum) {
  450. /* ==== Accumulate U. (If necessary, update Z later */
  451. /* . with with an efficient matrix-matrix */
  452. /* . multiply.) ==== */
  453. kms = k - incol;
  454. /* Computing MAX */
  455. i__4 = 1, i__6 = *ktop - incol;
  456. i__7 = kdu;
  457. for (j = max(i__4,i__6); j <= i__7; ++j) {
  458. refsum = v[m * v_dim1 + 1] * (u[j + (kms + 1) *
  459. u_dim1] + v[m * v_dim1 + 2] * u[j + (kms
  460. + 2) * u_dim1] + v[m * v_dim1 + 3] * u[j
  461. + (kms + 3) * u_dim1]);
  462. u[j + (kms + 1) * u_dim1] -= refsum;
  463. u[j + (kms + 2) * u_dim1] -= refsum * v[m *
  464. v_dim1 + 2];
  465. u[j + (kms + 3) * u_dim1] -= refsum * v[m *
  466. v_dim1 + 3];
  467. /* L70: */
  468. }
  469. } else if (*wantz) {
  470. /* ==== U is not accumulated, so update Z */
  471. /* . now by multiplying by reflections */
  472. /* . from the right. ==== */
  473. i__7 = *ihiz;
  474. for (j = *iloz; j <= i__7; ++j) {
  475. refsum = v[m * v_dim1 + 1] * (z__[j + (k + 1) *
  476. z_dim1] + v[m * v_dim1 + 2] * z__[j + (k
  477. + 2) * z_dim1] + v[m * v_dim1 + 3] * z__[
  478. j + (k + 3) * z_dim1]);
  479. z__[j + (k + 1) * z_dim1] -= refsum;
  480. z__[j + (k + 2) * z_dim1] -= refsum * v[m *
  481. v_dim1 + 2];
  482. z__[j + (k + 3) * z_dim1] -= refsum * v[m *
  483. v_dim1 + 3];
  484. /* L80: */
  485. }
  486. }
  487. }
  488. /* L90: */
  489. }
  490. /* ==== Special case: 2-by-2 reflection (if needed) ==== */
  491. k = krcol + (m22 - 1) * 3;
  492. if (bmp22 && v[m22 * v_dim1 + 1] != 0.) {
  493. /* Computing MIN */
  494. i__7 = *kbot, i__4 = k + 3;
  495. i__5 = min(i__7,i__4);
  496. for (j = jtop; j <= i__5; ++j) {
  497. refsum = v[m22 * v_dim1 + 1] * (h__[j + (k + 1) * h_dim1]
  498. + v[m22 * v_dim1 + 2] * h__[j + (k + 2) * h_dim1])
  499. ;
  500. h__[j + (k + 1) * h_dim1] -= refsum;
  501. h__[j + (k + 2) * h_dim1] -= refsum * v[m22 * v_dim1 + 2];
  502. /* L100: */
  503. }
  504. if (accum) {
  505. kms = k - incol;
  506. /* Computing MAX */
  507. i__5 = 1, i__7 = *ktop - incol;
  508. i__4 = kdu;
  509. for (j = max(i__5,i__7); j <= i__4; ++j) {
  510. refsum = v[m22 * v_dim1 + 1] * (u[j + (kms + 1) *
  511. u_dim1] + v[m22 * v_dim1 + 2] * u[j + (kms +
  512. 2) * u_dim1]);
  513. u[j + (kms + 1) * u_dim1] -= refsum;
  514. u[j + (kms + 2) * u_dim1] -= refsum * v[m22 * v_dim1
  515. + 2];
  516. /* L110: */
  517. }
  518. } else if (*wantz) {
  519. i__4 = *ihiz;
  520. for (j = *iloz; j <= i__4; ++j) {
  521. refsum = v[m22 * v_dim1 + 1] * (z__[j + (k + 1) *
  522. z_dim1] + v[m22 * v_dim1 + 2] * z__[j + (k +
  523. 2) * z_dim1]);
  524. z__[j + (k + 1) * z_dim1] -= refsum;
  525. z__[j + (k + 2) * z_dim1] -= refsum * v[m22 * v_dim1
  526. + 2];
  527. /* L120: */
  528. }
  529. }
  530. }
  531. /* ==== Vigilant deflation check ==== */
  532. mstart = mtop;
  533. if (krcol + (mstart - 1) * 3 < *ktop) {
  534. ++mstart;
  535. }
  536. mend = mbot;
  537. if (bmp22) {
  538. ++mend;
  539. }
  540. if (krcol == *kbot - 2) {
  541. ++mend;
  542. }
  543. i__4 = mend;
  544. for (m = mstart; m <= i__4; ++m) {
  545. /* Computing MIN */
  546. i__5 = *kbot - 1, i__7 = krcol + (m - 1) * 3;
  547. k = min(i__5,i__7);
  548. /* ==== The following convergence test requires that */
  549. /* . the tradition small-compared-to-nearby-diagonals */
  550. /* . criterion and the Ahues & Tisseur (LAWN 122, 1997) */
  551. /* . criteria both be satisfied. The latter improves */
  552. /* . accuracy in some examples. Falling back on an */
  553. /* . alternate convergence criterion when TST1 or TST2 */
  554. /* . is zero (as done here) is traditional but probably */
  555. /* . unnecessary. ==== */
  556. if (h__[k + 1 + k * h_dim1] != 0.) {
  557. tst1 = (d__1 = h__[k + k * h_dim1], abs(d__1)) + (d__2 =
  558. h__[k + 1 + (k + 1) * h_dim1], abs(d__2));
  559. if (tst1 == 0.) {
  560. if (k >= *ktop + 1) {
  561. tst1 += (d__1 = h__[k + (k - 1) * h_dim1], abs(
  562. d__1));
  563. }
  564. if (k >= *ktop + 2) {
  565. tst1 += (d__1 = h__[k + (k - 2) * h_dim1], abs(
  566. d__1));
  567. }
  568. if (k >= *ktop + 3) {
  569. tst1 += (d__1 = h__[k + (k - 3) * h_dim1], abs(
  570. d__1));
  571. }
  572. if (k <= *kbot - 2) {
  573. tst1 += (d__1 = h__[k + 2 + (k + 1) * h_dim1],
  574. abs(d__1));
  575. }
  576. if (k <= *kbot - 3) {
  577. tst1 += (d__1 = h__[k + 3 + (k + 1) * h_dim1],
  578. abs(d__1));
  579. }
  580. if (k <= *kbot - 4) {
  581. tst1 += (d__1 = h__[k + 4 + (k + 1) * h_dim1],
  582. abs(d__1));
  583. }
  584. }
  585. /* Computing MAX */
  586. d__2 = smlnum, d__3 = ulp * tst1;
  587. if ((d__1 = h__[k + 1 + k * h_dim1], abs(d__1)) <= max(
  588. d__2,d__3)) {
  589. /* Computing MAX */
  590. d__3 = (d__1 = h__[k + 1 + k * h_dim1], abs(d__1)),
  591. d__4 = (d__2 = h__[k + (k + 1) * h_dim1], abs(
  592. d__2));
  593. h12 = max(d__3,d__4);
  594. /* Computing MIN */
  595. d__3 = (d__1 = h__[k + 1 + k * h_dim1], abs(d__1)),
  596. d__4 = (d__2 = h__[k + (k + 1) * h_dim1], abs(
  597. d__2));
  598. h21 = min(d__3,d__4);
  599. /* Computing MAX */
  600. d__3 = (d__1 = h__[k + 1 + (k + 1) * h_dim1], abs(
  601. d__1)), d__4 = (d__2 = h__[k + k * h_dim1] -
  602. h__[k + 1 + (k + 1) * h_dim1], abs(d__2));
  603. h11 = max(d__3,d__4);
  604. /* Computing MIN */
  605. d__3 = (d__1 = h__[k + 1 + (k + 1) * h_dim1], abs(
  606. d__1)), d__4 = (d__2 = h__[k + k * h_dim1] -
  607. h__[k + 1 + (k + 1) * h_dim1], abs(d__2));
  608. h22 = min(d__3,d__4);
  609. scl = h11 + h12;
  610. tst2 = h22 * (h11 / scl);
  611. /* Computing MAX */
  612. d__1 = smlnum, d__2 = ulp * tst2;
  613. if (tst2 == 0. || h21 * (h12 / scl) <= max(d__1,d__2))
  614. {
  615. h__[k + 1 + k * h_dim1] = 0.;
  616. }
  617. }
  618. }
  619. /* L130: */
  620. }
  621. /* ==== Fill in the last row of each bulge. ==== */
  622. /* Computing MIN */
  623. i__4 = nbmps, i__5 = (*kbot - krcol - 1) / 3;
  624. mend = min(i__4,i__5);
  625. i__4 = mend;
  626. for (m = mtop; m <= i__4; ++m) {
  627. k = krcol + (m - 1) * 3;
  628. refsum = v[m * v_dim1 + 1] * v[m * v_dim1 + 3] * h__[k + 4 + (
  629. k + 3) * h_dim1];
  630. h__[k + 4 + (k + 1) * h_dim1] = -refsum;
  631. h__[k + 4 + (k + 2) * h_dim1] = -refsum * v[m * v_dim1 + 2];
  632. h__[k + 4 + (k + 3) * h_dim1] -= refsum * v[m * v_dim1 + 3];
  633. /* L140: */
  634. }
  635. /* ==== End of near-the-diagonal bulge chase. ==== */
  636. /* L150: */
  637. }
  638. /* ==== Use U (if accumulated) to update far-from-diagonal */
  639. /* . entries in H. If required, use U to update Z as */
  640. /* . well. ==== */
  641. if (accum) {
  642. if (*wantt) {
  643. jtop = 1;
  644. jbot = *n;
  645. } else {
  646. jtop = *ktop;
  647. jbot = *kbot;
  648. }
  649. if (! blk22 || incol < *ktop || ndcol > *kbot || ns <= 2) {
  650. /* ==== Updates not exploiting the 2-by-2 block */
  651. /* . structure of U. K1 and NU keep track of */
  652. /* . the location and size of U in the special */
  653. /* . cases of introducing bulges and chasing */
  654. /* . bulges off the bottom. In these special */
  655. /* . cases and in case the number of shifts */
  656. /* . is NS = 2, there is no 2-by-2 block */
  657. /* . structure to exploit. ==== */
  658. /* Computing MAX */
  659. i__3 = 1, i__4 = *ktop - incol;
  660. k1 = max(i__3,i__4);
  661. /* Computing MAX */
  662. i__3 = 0, i__4 = ndcol - *kbot;
  663. nu = kdu - max(i__3,i__4) - k1 + 1;
  664. /* ==== Horizontal Multiply ==== */
  665. i__3 = jbot;
  666. i__4 = *nh;
  667. for (jcol = min(ndcol,*kbot) + 1; i__4 < 0 ? jcol >= i__3 :
  668. jcol <= i__3; jcol += i__4) {
  669. /* Computing MIN */
  670. i__5 = *nh, i__7 = jbot - jcol + 1;
  671. jlen = min(i__5,i__7);
  672. _starpu_dgemm_("C", "N", &nu, &jlen, &nu, &c_b8, &u[k1 + k1 *
  673. u_dim1], ldu, &h__[incol + k1 + jcol * h_dim1],
  674. ldh, &c_b7, &wh[wh_offset], ldwh);
  675. _starpu_dlacpy_("ALL", &nu, &jlen, &wh[wh_offset], ldwh, &h__[
  676. incol + k1 + jcol * h_dim1], ldh);
  677. /* L160: */
  678. }
  679. /* ==== Vertical multiply ==== */
  680. i__4 = max(*ktop,incol) - 1;
  681. i__3 = *nv;
  682. for (jrow = jtop; i__3 < 0 ? jrow >= i__4 : jrow <= i__4;
  683. jrow += i__3) {
  684. /* Computing MIN */
  685. i__5 = *nv, i__7 = max(*ktop,incol) - jrow;
  686. jlen = min(i__5,i__7);
  687. _starpu_dgemm_("N", "N", &jlen, &nu, &nu, &c_b8, &h__[jrow + (
  688. incol + k1) * h_dim1], ldh, &u[k1 + k1 * u_dim1],
  689. ldu, &c_b7, &wv[wv_offset], ldwv);
  690. _starpu_dlacpy_("ALL", &jlen, &nu, &wv[wv_offset], ldwv, &h__[
  691. jrow + (incol + k1) * h_dim1], ldh);
  692. /* L170: */
  693. }
  694. /* ==== Z multiply (also vertical) ==== */
  695. if (*wantz) {
  696. i__3 = *ihiz;
  697. i__4 = *nv;
  698. for (jrow = *iloz; i__4 < 0 ? jrow >= i__3 : jrow <= i__3;
  699. jrow += i__4) {
  700. /* Computing MIN */
  701. i__5 = *nv, i__7 = *ihiz - jrow + 1;
  702. jlen = min(i__5,i__7);
  703. _starpu_dgemm_("N", "N", &jlen, &nu, &nu, &c_b8, &z__[jrow + (
  704. incol + k1) * z_dim1], ldz, &u[k1 + k1 *
  705. u_dim1], ldu, &c_b7, &wv[wv_offset], ldwv);
  706. _starpu_dlacpy_("ALL", &jlen, &nu, &wv[wv_offset], ldwv, &z__[
  707. jrow + (incol + k1) * z_dim1], ldz)
  708. ;
  709. /* L180: */
  710. }
  711. }
  712. } else {
  713. /* ==== Updates exploiting U's 2-by-2 block structure. */
  714. /* . (I2, I4, J2, J4 are the last rows and columns */
  715. /* . of the blocks.) ==== */
  716. i2 = (kdu + 1) / 2;
  717. i4 = kdu;
  718. j2 = i4 - i2;
  719. j4 = kdu;
  720. /* ==== KZS and KNZ deal with the band of zeros */
  721. /* . along the diagonal of one of the triangular */
  722. /* . blocks. ==== */
  723. kzs = j4 - j2 - (ns + 1);
  724. knz = ns + 1;
  725. /* ==== Horizontal multiply ==== */
  726. i__4 = jbot;
  727. i__3 = *nh;
  728. for (jcol = min(ndcol,*kbot) + 1; i__3 < 0 ? jcol >= i__4 :
  729. jcol <= i__4; jcol += i__3) {
  730. /* Computing MIN */
  731. i__5 = *nh, i__7 = jbot - jcol + 1;
  732. jlen = min(i__5,i__7);
  733. /* ==== Copy bottom of H to top+KZS of scratch ==== */
  734. /* (The first KZS rows get multiplied by zero.) ==== */
  735. _starpu_dlacpy_("ALL", &knz, &jlen, &h__[incol + 1 + j2 + jcol *
  736. h_dim1], ldh, &wh[kzs + 1 + wh_dim1], ldwh);
  737. /* ==== Multiply by U21' ==== */
  738. _starpu_dlaset_("ALL", &kzs, &jlen, &c_b7, &c_b7, &wh[wh_offset],
  739. ldwh);
  740. _starpu_dtrmm_("L", "U", "C", "N", &knz, &jlen, &c_b8, &u[j2 + 1
  741. + (kzs + 1) * u_dim1], ldu, &wh[kzs + 1 + wh_dim1]
  742. , ldwh);
  743. /* ==== Multiply top of H by U11' ==== */
  744. _starpu_dgemm_("C", "N", &i2, &jlen, &j2, &c_b8, &u[u_offset],
  745. ldu, &h__[incol + 1 + jcol * h_dim1], ldh, &c_b8,
  746. &wh[wh_offset], ldwh);
  747. /* ==== Copy top of H to bottom of WH ==== */
  748. _starpu_dlacpy_("ALL", &j2, &jlen, &h__[incol + 1 + jcol * h_dim1]
  749. , ldh, &wh[i2 + 1 + wh_dim1], ldwh);
  750. /* ==== Multiply by U21' ==== */
  751. _starpu_dtrmm_("L", "L", "C", "N", &j2, &jlen, &c_b8, &u[(i2 + 1)
  752. * u_dim1 + 1], ldu, &wh[i2 + 1 + wh_dim1], ldwh);
  753. /* ==== Multiply by U22 ==== */
  754. i__5 = i4 - i2;
  755. i__7 = j4 - j2;
  756. _starpu_dgemm_("C", "N", &i__5, &jlen, &i__7, &c_b8, &u[j2 + 1 + (
  757. i2 + 1) * u_dim1], ldu, &h__[incol + 1 + j2 +
  758. jcol * h_dim1], ldh, &c_b8, &wh[i2 + 1 + wh_dim1],
  759. ldwh);
  760. /* ==== Copy it back ==== */
  761. _starpu_dlacpy_("ALL", &kdu, &jlen, &wh[wh_offset], ldwh, &h__[
  762. incol + 1 + jcol * h_dim1], ldh);
  763. /* L190: */
  764. }
  765. /* ==== Vertical multiply ==== */
  766. i__3 = max(incol,*ktop) - 1;
  767. i__4 = *nv;
  768. for (jrow = jtop; i__4 < 0 ? jrow >= i__3 : jrow <= i__3;
  769. jrow += i__4) {
  770. /* Computing MIN */
  771. i__5 = *nv, i__7 = max(incol,*ktop) - jrow;
  772. jlen = min(i__5,i__7);
  773. /* ==== Copy right of H to scratch (the first KZS */
  774. /* . columns get multiplied by zero) ==== */
  775. _starpu_dlacpy_("ALL", &jlen, &knz, &h__[jrow + (incol + 1 + j2) *
  776. h_dim1], ldh, &wv[(kzs + 1) * wv_dim1 + 1], ldwv);
  777. /* ==== Multiply by U21 ==== */
  778. _starpu_dlaset_("ALL", &jlen, &kzs, &c_b7, &c_b7, &wv[wv_offset],
  779. ldwv);
  780. _starpu_dtrmm_("R", "U", "N", "N", &jlen, &knz, &c_b8, &u[j2 + 1
  781. + (kzs + 1) * u_dim1], ldu, &wv[(kzs + 1) *
  782. wv_dim1 + 1], ldwv);
  783. /* ==== Multiply by U11 ==== */
  784. _starpu_dgemm_("N", "N", &jlen, &i2, &j2, &c_b8, &h__[jrow + (
  785. incol + 1) * h_dim1], ldh, &u[u_offset], ldu, &
  786. c_b8, &wv[wv_offset], ldwv);
  787. /* ==== Copy left of H to right of scratch ==== */
  788. _starpu_dlacpy_("ALL", &jlen, &j2, &h__[jrow + (incol + 1) *
  789. h_dim1], ldh, &wv[(i2 + 1) * wv_dim1 + 1], ldwv);
  790. /* ==== Multiply by U21 ==== */
  791. i__5 = i4 - i2;
  792. _starpu_dtrmm_("R", "L", "N", "N", &jlen, &i__5, &c_b8, &u[(i2 +
  793. 1) * u_dim1 + 1], ldu, &wv[(i2 + 1) * wv_dim1 + 1]
  794. , ldwv);
  795. /* ==== Multiply by U22 ==== */
  796. i__5 = i4 - i2;
  797. i__7 = j4 - j2;
  798. _starpu_dgemm_("N", "N", &jlen, &i__5, &i__7, &c_b8, &h__[jrow + (
  799. incol + 1 + j2) * h_dim1], ldh, &u[j2 + 1 + (i2 +
  800. 1) * u_dim1], ldu, &c_b8, &wv[(i2 + 1) * wv_dim1
  801. + 1], ldwv);
  802. /* ==== Copy it back ==== */
  803. _starpu_dlacpy_("ALL", &jlen, &kdu, &wv[wv_offset], ldwv, &h__[
  804. jrow + (incol + 1) * h_dim1], ldh);
  805. /* L200: */
  806. }
  807. /* ==== Multiply Z (also vertical) ==== */
  808. if (*wantz) {
  809. i__4 = *ihiz;
  810. i__3 = *nv;
  811. for (jrow = *iloz; i__3 < 0 ? jrow >= i__4 : jrow <= i__4;
  812. jrow += i__3) {
  813. /* Computing MIN */
  814. i__5 = *nv, i__7 = *ihiz - jrow + 1;
  815. jlen = min(i__5,i__7);
  816. /* ==== Copy right of Z to left of scratch (first */
  817. /* . KZS columns get multiplied by zero) ==== */
  818. _starpu_dlacpy_("ALL", &jlen, &knz, &z__[jrow + (incol + 1 +
  819. j2) * z_dim1], ldz, &wv[(kzs + 1) * wv_dim1 +
  820. 1], ldwv);
  821. /* ==== Multiply by U12 ==== */
  822. _starpu_dlaset_("ALL", &jlen, &kzs, &c_b7, &c_b7, &wv[
  823. wv_offset], ldwv);
  824. _starpu_dtrmm_("R", "U", "N", "N", &jlen, &knz, &c_b8, &u[j2
  825. + 1 + (kzs + 1) * u_dim1], ldu, &wv[(kzs + 1)
  826. * wv_dim1 + 1], ldwv);
  827. /* ==== Multiply by U11 ==== */
  828. _starpu_dgemm_("N", "N", &jlen, &i2, &j2, &c_b8, &z__[jrow + (
  829. incol + 1) * z_dim1], ldz, &u[u_offset], ldu,
  830. &c_b8, &wv[wv_offset], ldwv);
  831. /* ==== Copy left of Z to right of scratch ==== */
  832. _starpu_dlacpy_("ALL", &jlen, &j2, &z__[jrow + (incol + 1) *
  833. z_dim1], ldz, &wv[(i2 + 1) * wv_dim1 + 1],
  834. ldwv);
  835. /* ==== Multiply by U21 ==== */
  836. i__5 = i4 - i2;
  837. _starpu_dtrmm_("R", "L", "N", "N", &jlen, &i__5, &c_b8, &u[(
  838. i2 + 1) * u_dim1 + 1], ldu, &wv[(i2 + 1) *
  839. wv_dim1 + 1], ldwv);
  840. /* ==== Multiply by U22 ==== */
  841. i__5 = i4 - i2;
  842. i__7 = j4 - j2;
  843. _starpu_dgemm_("N", "N", &jlen, &i__5, &i__7, &c_b8, &z__[
  844. jrow + (incol + 1 + j2) * z_dim1], ldz, &u[j2
  845. + 1 + (i2 + 1) * u_dim1], ldu, &c_b8, &wv[(i2
  846. + 1) * wv_dim1 + 1], ldwv);
  847. /* ==== Copy the result back to Z ==== */
  848. _starpu_dlacpy_("ALL", &jlen, &kdu, &wv[wv_offset], ldwv, &
  849. z__[jrow + (incol + 1) * z_dim1], ldz);
  850. /* L210: */
  851. }
  852. }
  853. }
  854. }
  855. /* L220: */
  856. }
  857. /* ==== End of DLAQR5 ==== */
  858. return 0;
  859. } /* _starpu_dlaqr5_ */