dgejsv.c 78 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219
  1. /* dgejsv.f -- translated by f2c (version 20061008).
  2. You must link the resulting object file with libf2c:
  3. on Microsoft Windows system, link with libf2c.lib;
  4. on Linux or Unix systems, link with .../path/to/libf2c.a -lm
  5. or, if you install libf2c.a in a standard place, with -lf2c -lm
  6. -- in that order, at the end of the command line, as in
  7. cc *.o -lf2c -lm
  8. Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
  9. http://www.netlib.org/f2c/libf2c.zip
  10. */
  11. #include "f2c.h"
  12. #include "blaswrap.h"
  13. /* Table of constant values */
  14. static integer c__1 = 1;
  15. static doublereal c_b34 = 0.;
  16. static doublereal c_b35 = 1.;
  17. static integer c__0 = 0;
  18. static integer c_n1 = -1;
  19. /* Subroutine */ int _starpu_dgejsv_(char *joba, char *jobu, char *jobv, char *jobr,
  20. char *jobt, char *jobp, integer *m, integer *n, doublereal *a,
  21. integer *lda, doublereal *sva, doublereal *u, integer *ldu,
  22. doublereal *v, integer *ldv, doublereal *work, integer *lwork,
  23. integer *iwork, integer *info)
  24. {
  25. /* System generated locals */
  26. integer a_dim1, a_offset, u_dim1, u_offset, v_dim1, v_offset, i__1, i__2,
  27. i__3, i__4, i__5, i__6, i__7, i__8, i__9, i__10;
  28. doublereal d__1, d__2, d__3, d__4;
  29. /* Builtin functions */
  30. double sqrt(doublereal), log(doublereal), d_sign(doublereal *, doublereal
  31. *);
  32. integer i_dnnt(doublereal *);
  33. /* Local variables */
  34. integer p, q, n1, nr;
  35. doublereal big, xsc, big1;
  36. logical defr;
  37. doublereal aapp, aaqq;
  38. logical kill;
  39. integer ierr;
  40. extern doublereal _starpu_dnrm2_(integer *, doublereal *, integer *);
  41. doublereal temp1;
  42. logical jracc;
  43. extern /* Subroutine */ int _starpu_dscal_(integer *, doublereal *, doublereal *,
  44. integer *);
  45. extern logical _starpu_lsame_(char *, char *);
  46. doublereal small, entra, sfmin;
  47. logical lsvec;
  48. extern /* Subroutine */ int _starpu_dcopy_(integer *, doublereal *, integer *,
  49. doublereal *, integer *), _starpu_dswap_(integer *, doublereal *, integer
  50. *, doublereal *, integer *);
  51. doublereal epsln;
  52. logical rsvec;
  53. extern /* Subroutine */ int _starpu_dtrsm_(char *, char *, char *, char *,
  54. integer *, integer *, doublereal *, doublereal *, integer *,
  55. doublereal *, integer *);
  56. logical l2aber;
  57. extern /* Subroutine */ int _starpu_dgeqp3_(integer *, integer *, doublereal *,
  58. integer *, integer *, doublereal *, doublereal *, integer *,
  59. integer *);
  60. doublereal condr1, condr2, uscal1, uscal2;
  61. logical l2kill, l2rank, l2tran, l2pert;
  62. extern doublereal _starpu_dlamch_(char *);
  63. extern /* Subroutine */ int _starpu_dgelqf_(integer *, integer *, doublereal *,
  64. integer *, doublereal *, doublereal *, integer *, integer *);
  65. extern integer _starpu_idamax_(integer *, doublereal *, integer *);
  66. doublereal scalem;
  67. extern /* Subroutine */ int _starpu_dlascl_(char *, integer *, integer *,
  68. doublereal *, doublereal *, integer *, integer *, doublereal *,
  69. integer *, integer *);
  70. doublereal sconda;
  71. logical goscal;
  72. doublereal aatmin;
  73. extern /* Subroutine */ int _starpu_dgeqrf_(integer *, integer *, doublereal *,
  74. integer *, doublereal *, doublereal *, integer *, integer *);
  75. doublereal aatmax;
  76. extern /* Subroutine */ int _starpu_dlacpy_(char *, integer *, integer *,
  77. doublereal *, integer *, doublereal *, integer *),
  78. _starpu_dlaset_(char *, integer *, integer *, doublereal *, doublereal *,
  79. doublereal *, integer *), _starpu_xerbla_(char *, integer *);
  80. logical noscal;
  81. extern /* Subroutine */ int _starpu_dpocon_(char *, integer *, doublereal *,
  82. integer *, doublereal *, doublereal *, doublereal *, integer *,
  83. integer *), _starpu_dgesvj_(char *, char *, char *, integer *,
  84. integer *, doublereal *, integer *, doublereal *, integer *,
  85. doublereal *, integer *, doublereal *, integer *, integer *), _starpu_dlassq_(integer *, doublereal *, integer
  86. *, doublereal *, doublereal *), _starpu_dlaswp_(integer *, doublereal *,
  87. integer *, integer *, integer *, integer *, integer *);
  88. doublereal entrat;
  89. logical almort;
  90. extern /* Subroutine */ int _starpu_dorgqr_(integer *, integer *, integer *,
  91. doublereal *, integer *, doublereal *, doublereal *, integer *,
  92. integer *), _starpu_dormlq_(char *, char *, integer *, integer *, integer
  93. *, doublereal *, integer *, doublereal *, doublereal *, integer *,
  94. doublereal *, integer *, integer *);
  95. doublereal maxprj;
  96. logical errest;
  97. extern /* Subroutine */ int _starpu_dormqr_(char *, char *, integer *, integer *,
  98. integer *, doublereal *, integer *, doublereal *, doublereal *,
  99. integer *, doublereal *, integer *, integer *);
  100. logical transp, rowpiv;
  101. doublereal cond_ok__;
  102. integer warning, numrank;
  103. /* -- LAPACK routine (version 3.2) -- */
  104. /* -- Contributed by Zlatko Drmac of the University of Zagreb and -- */
  105. /* -- Kresimir Veselic of the Fernuniversitaet Hagen -- */
  106. /* -- November 2008 -- */
  107. /* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
  108. /* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
  109. /* This routine is also part of SIGMA (version 1.23, October 23. 2008.) */
  110. /* SIGMA is a library of algorithms for highly accurate algorithms for */
  111. /* computation of SVD, PSVD, QSVD, (H,K)-SVD, and for solution of the */
  112. /* eigenvalue problems Hx = lambda M x, H M x = lambda x with H, M > 0. */
  113. /* -#- Scalar Arguments -#- */
  114. /* -#- Array Arguments -#- */
  115. /* .. */
  116. /* Purpose */
  117. /* ~~~~~~~ */
  118. /* DGEJSV computes the singular value decomposition (SVD) of a real M-by-N */
  119. /* matrix [A], where M >= N. The SVD of [A] is written as */
  120. /* [A] = [U] * [SIGMA] * [V]^t, */
  121. /* where [SIGMA] is an N-by-N (M-by-N) matrix which is zero except for its N */
  122. /* diagonal elements, [U] is an M-by-N (or M-by-M) orthonormal matrix, and */
  123. /* [V] is an N-by-N orthogonal matrix. The diagonal elements of [SIGMA] are */
  124. /* the singular values of [A]. The columns of [U] and [V] are the left and */
  125. /* the right singular vectors of [A], respectively. The matrices [U] and [V] */
  126. /* are computed and stored in the arrays U and V, respectively. The diagonal */
  127. /* of [SIGMA] is computed and stored in the array SVA. */
  128. /* Further details */
  129. /* ~~~~~~~~~~~~~~~ */
  130. /* DGEJSV implements a preconditioned Jacobi SVD algorithm. It uses SGEQP3, */
  131. /* SGEQRF, and SGELQF as preprocessors and preconditioners. Optionally, an */
  132. /* additional row pivoting can be used as a preprocessor, which in some */
  133. /* cases results in much higher accuracy. An example is matrix A with the */
  134. /* structure A = D1 * C * D2, where D1, D2 are arbitrarily ill-conditioned */
  135. /* diagonal matrices and C is well-conditioned matrix. In that case, complete */
  136. /* pivoting in the first QR factorizations provides accuracy dependent on the */
  137. /* condition number of C, and independent of D1, D2. Such higher accuracy is */
  138. /* not completely understood theoretically, but it works well in practice. */
  139. /* Further, if A can be written as A = B*D, with well-conditioned B and some */
  140. /* diagonal D, then the high accuracy is guaranteed, both theoretically and */
  141. /* in software, independent of D. For more details see [1], [2]. */
  142. /* The computational range for the singular values can be the full range */
  143. /* ( UNDERFLOW,OVERFLOW ), provided that the machine arithmetic and the BLAS */
  144. /* & LAPACK routines called by DGEJSV are implemented to work in that range. */
  145. /* If that is not the case, then the restriction for safe computation with */
  146. /* the singular values in the range of normalized IEEE numbers is that the */
  147. /* spectral condition number kappa(A)=sigma_max(A)/sigma_min(A) does not */
  148. /* overflow. This code (DGEJSV) is best used in this restricted range, */
  149. /* meaning that singular values of magnitude below ||A||_2 / SLAMCH('O') are */
  150. /* returned as zeros. See JOBR for details on this. */
  151. /* Further, this implementation is somewhat slower than the one described */
  152. /* in [1,2] due to replacement of some non-LAPACK components, and because */
  153. /* the choice of some tuning parameters in the iterative part (DGESVJ) is */
  154. /* left to the implementer on a particular machine. */
  155. /* The rank revealing QR factorization (in this code: SGEQP3) should be */
  156. /* implemented as in [3]. We have a new version of SGEQP3 under development */
  157. /* that is more robust than the current one in LAPACK, with a cleaner cut in */
  158. /* rank defficient cases. It will be available in the SIGMA library [4]. */
  159. /* If M is much larger than N, it is obvious that the inital QRF with */
  160. /* column pivoting can be preprocessed by the QRF without pivoting. That */
  161. /* well known trick is not used in DGEJSV because in some cases heavy row */
  162. /* weighting can be treated with complete pivoting. The overhead in cases */
  163. /* M much larger than N is then only due to pivoting, but the benefits in */
  164. /* terms of accuracy have prevailed. The implementer/user can incorporate */
  165. /* this extra QRF step easily. The implementer can also improve data movement */
  166. /* (matrix transpose, matrix copy, matrix transposed copy) - this */
  167. /* implementation of DGEJSV uses only the simplest, naive data movement. */
  168. /* Contributors */
  169. /* ~~~~~~~~~~~~ */
  170. /* Zlatko Drmac (Zagreb, Croatia) and Kresimir Veselic (Hagen, Germany) */
  171. /* References */
  172. /* ~~~~~~~~~~ */
  173. /* [1] Z. Drmac and K. Veselic: New fast and accurate Jacobi SVD algorithm I. */
  174. /* SIAM J. Matrix Anal. Appl. Vol. 35, No. 2 (2008), pp. 1322-1342. */
  175. /* LAPACK Working note 169. */
  176. /* [2] Z. Drmac and K. Veselic: New fast and accurate Jacobi SVD algorithm II. */
  177. /* SIAM J. Matrix Anal. Appl. Vol. 35, No. 2 (2008), pp. 1343-1362. */
  178. /* LAPACK Working note 170. */
  179. /* [3] Z. Drmac and Z. Bujanovic: On the failure of rank-revealing QR */
  180. /* factorization software - a case study. */
  181. /* ACM Trans. Math. Softw. Vol. 35, No 2 (2008), pp. 1-28. */
  182. /* LAPACK Working note 176. */
  183. /* [4] Z. Drmac: SIGMA - mathematical software library for accurate SVD, PSV, */
  184. /* QSVD, (H,K)-SVD computations. */
  185. /* Department of Mathematics, University of Zagreb, 2008. */
  186. /* Bugs, examples and comments */
  187. /* ~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
  188. /* Please report all bugs and send interesting examples and/or comments to */
  189. /* drmac@math.hr. Thank you. */
  190. /* Arguments */
  191. /* ~~~~~~~~~ */
  192. /* ............................................................................ */
  193. /* . JOBA (input) CHARACTER*1 */
  194. /* . Specifies the level of accuracy: */
  195. /* . = 'C': This option works well (high relative accuracy) if A = B * D, */
  196. /* . with well-conditioned B and arbitrary diagonal matrix D. */
  197. /* . The accuracy cannot be spoiled by COLUMN scaling. The */
  198. /* . accuracy of the computed output depends on the condition of */
  199. /* . B, and the procedure aims at the best theoretical accuracy. */
  200. /* . The relative error max_{i=1:N}|d sigma_i| / sigma_i is */
  201. /* . bounded by f(M,N)*epsilon* cond(B), independent of D. */
  202. /* . The input matrix is preprocessed with the QRF with column */
  203. /* . pivoting. This initial preprocessing and preconditioning by */
  204. /* . a rank revealing QR factorization is common for all values of */
  205. /* . JOBA. Additional actions are specified as follows: */
  206. /* . = 'E': Computation as with 'C' with an additional estimate of the */
  207. /* . condition number of B. It provides a realistic error bound. */
  208. /* . = 'F': If A = D1 * C * D2 with ill-conditioned diagonal scalings */
  209. /* . D1, D2, and well-conditioned matrix C, this option gives */
  210. /* . higher accuracy than the 'C' option. If the structure of the */
  211. /* . input matrix is not known, and relative accuracy is */
  212. /* . desirable, then this option is advisable. The input matrix A */
  213. /* . is preprocessed with QR factorization with FULL (row and */
  214. /* . column) pivoting. */
  215. /* . = 'G' Computation as with 'F' with an additional estimate of the */
  216. /* . condition number of B, where A=D*B. If A has heavily weighted */
  217. /* . rows, then using this condition number gives too pessimistic */
  218. /* . error bound. */
  219. /* . = 'A': Small singular values are the noise and the matrix is treated */
  220. /* . as numerically rank defficient. The error in the computed */
  221. /* . singular values is bounded by f(m,n)*epsilon*||A||. */
  222. /* . The computed SVD A = U * S * V^t restores A up to */
  223. /* . f(m,n)*epsilon*||A||. */
  224. /* . This gives the procedure the licence to discard (set to zero) */
  225. /* . all singular values below N*epsilon*||A||. */
  226. /* . = 'R': Similar as in 'A'. Rank revealing property of the initial */
  227. /* . QR factorization is used do reveal (using triangular factor) */
  228. /* . a gap sigma_{r+1} < epsilon * sigma_r in which case the */
  229. /* . numerical RANK is declared to be r. The SVD is computed with */
  230. /* . absolute error bounds, but more accurately than with 'A'. */
  231. /* . */
  232. /* . JOBU (input) CHARACTER*1 */
  233. /* . Specifies whether to compute the columns of U: */
  234. /* . = 'U': N columns of U are returned in the array U. */
  235. /* . = 'F': full set of M left sing. vectors is returned in the array U. */
  236. /* . = 'W': U may be used as workspace of length M*N. See the description */
  237. /* . of U. */
  238. /* . = 'N': U is not computed. */
  239. /* . */
  240. /* . JOBV (input) CHARACTER*1 */
  241. /* . Specifies whether to compute the matrix V: */
  242. /* . = 'V': N columns of V are returned in the array V; Jacobi rotations */
  243. /* . are not explicitly accumulated. */
  244. /* . = 'J': N columns of V are returned in the array V, but they are */
  245. /* . computed as the product of Jacobi rotations. This option is */
  246. /* . allowed only if JOBU .NE. 'N', i.e. in computing the full SVD. */
  247. /* . = 'W': V may be used as workspace of length N*N. See the description */
  248. /* . of V. */
  249. /* . = 'N': V is not computed. */
  250. /* . */
  251. /* . JOBR (input) CHARACTER*1 */
  252. /* . Specifies the RANGE for the singular values. Issues the licence to */
  253. /* . set to zero small positive singular values if they are outside */
  254. /* . specified range. If A .NE. 0 is scaled so that the largest singular */
  255. /* . value of c*A is around DSQRT(BIG), BIG=SLAMCH('O'), then JOBR issues */
  256. /* . the licence to kill columns of A whose norm in c*A is less than */
  257. /* . DSQRT(SFMIN) (for JOBR.EQ.'R'), or less than SMALL=SFMIN/EPSLN, */
  258. /* . where SFMIN=SLAMCH('S'), EPSLN=SLAMCH('E'). */
  259. /* . = 'N': Do not kill small columns of c*A. This option assumes that */
  260. /* . BLAS and QR factorizations and triangular solvers are */
  261. /* . implemented to work in that range. If the condition of A */
  262. /* . is greater than BIG, use DGESVJ. */
  263. /* . = 'R': RESTRICTED range for sigma(c*A) is [DSQRT(SFMIN), DSQRT(BIG)] */
  264. /* . (roughly, as described above). This option is recommended. */
  265. /* . ~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
  266. /* . For computing the singular values in the FULL range [SFMIN,BIG] */
  267. /* . use DGESVJ. */
  268. /* . */
  269. /* . JOBT (input) CHARACTER*1 */
  270. /* . If the matrix is square then the procedure may determine to use */
  271. /* . transposed A if A^t seems to be better with respect to convergence. */
  272. /* . If the matrix is not square, JOBT is ignored. This is subject to */
  273. /* . changes in the future. */
  274. /* . The decision is based on two values of entropy over the adjoint */
  275. /* . orbit of A^t * A. See the descriptions of WORK(6) and WORK(7). */
  276. /* . = 'T': transpose if entropy test indicates possibly faster */
  277. /* . convergence of Jacobi process if A^t is taken as input. If A is */
  278. /* . replaced with A^t, then the row pivoting is included automatically. */
  279. /* . = 'N': do not speculate. */
  280. /* . This option can be used to compute only the singular values, or the */
  281. /* . full SVD (U, SIGMA and V). For only one set of singular vectors */
  282. /* . (U or V), the caller should provide both U and V, as one of the */
  283. /* . matrices is used as workspace if the matrix A is transposed. */
  284. /* . The implementer can easily remove this constraint and make the */
  285. /* . code more complicated. See the descriptions of U and V. */
  286. /* . */
  287. /* . JOBP (input) CHARACTER*1 */
  288. /* . Issues the licence to introduce structured perturbations to drown */
  289. /* . denormalized numbers. This licence should be active if the */
  290. /* . denormals are poorly implemented, causing slow computation, */
  291. /* . especially in cases of fast convergence (!). For details see [1,2]. */
  292. /* . For the sake of simplicity, this perturbations are included only */
  293. /* . when the full SVD or only the singular values are requested. The */
  294. /* . implementer/user can easily add the perturbation for the cases of */
  295. /* . computing one set of singular vectors. */
  296. /* . = 'P': introduce perturbation */
  297. /* . = 'N': do not perturb */
  298. /* ............................................................................ */
  299. /* M (input) INTEGER */
  300. /* The number of rows of the input matrix A. M >= 0. */
  301. /* N (input) INTEGER */
  302. /* The number of columns of the input matrix A. M >= N >= 0. */
  303. /* A (input/workspace) REAL array, dimension (LDA,N) */
  304. /* On entry, the M-by-N matrix A. */
  305. /* LDA (input) INTEGER */
  306. /* The leading dimension of the array A. LDA >= max(1,M). */
  307. /* SVA (workspace/output) REAL array, dimension (N) */
  308. /* On exit, */
  309. /* - For WORK(1)/WORK(2) = ONE: The singular values of A. During the */
  310. /* computation SVA contains Euclidean column norms of the */
  311. /* iterated matrices in the array A. */
  312. /* - For WORK(1) .NE. WORK(2): The singular values of A are */
  313. /* (WORK(1)/WORK(2)) * SVA(1:N). This factored form is used if */
  314. /* sigma_max(A) overflows or if small singular values have been */
  315. /* saved from underflow by scaling the input matrix A. */
  316. /* - If JOBR='R' then some of the singular values may be returned */
  317. /* as exact zeros obtained by "set to zero" because they are */
  318. /* below the numerical rank threshold or are denormalized numbers. */
  319. /* U (workspace/output) REAL array, dimension ( LDU, N ) */
  320. /* If JOBU = 'U', then U contains on exit the M-by-N matrix of */
  321. /* the left singular vectors. */
  322. /* If JOBU = 'F', then U contains on exit the M-by-M matrix of */
  323. /* the left singular vectors, including an ONB */
  324. /* of the orthogonal complement of the Range(A). */
  325. /* If JOBU = 'W' .AND. (JOBV.EQ.'V' .AND. JOBT.EQ.'T' .AND. M.EQ.N), */
  326. /* then U is used as workspace if the procedure */
  327. /* replaces A with A^t. In that case, [V] is computed */
  328. /* in U as left singular vectors of A^t and then */
  329. /* copied back to the V array. This 'W' option is just */
  330. /* a reminder to the caller that in this case U is */
  331. /* reserved as workspace of length N*N. */
  332. /* If JOBU = 'N' U is not referenced. */
  333. /* LDU (input) INTEGER */
  334. /* The leading dimension of the array U, LDU >= 1. */
  335. /* IF JOBU = 'U' or 'F' or 'W', then LDU >= M. */
  336. /* V (workspace/output) REAL array, dimension ( LDV, N ) */
  337. /* If JOBV = 'V', 'J' then V contains on exit the N-by-N matrix of */
  338. /* the right singular vectors; */
  339. /* If JOBV = 'W', AND (JOBU.EQ.'U' AND JOBT.EQ.'T' AND M.EQ.N), */
  340. /* then V is used as workspace if the pprocedure */
  341. /* replaces A with A^t. In that case, [U] is computed */
  342. /* in V as right singular vectors of A^t and then */
  343. /* copied back to the U array. This 'W' option is just */
  344. /* a reminder to the caller that in this case V is */
  345. /* reserved as workspace of length N*N. */
  346. /* If JOBV = 'N' V is not referenced. */
  347. /* LDV (input) INTEGER */
  348. /* The leading dimension of the array V, LDV >= 1. */
  349. /* If JOBV = 'V' or 'J' or 'W', then LDV >= N. */
  350. /* WORK (workspace/output) REAL array, dimension at least LWORK. */
  351. /* On exit, */
  352. /* WORK(1) = SCALE = WORK(2) / WORK(1) is the scaling factor such */
  353. /* that SCALE*SVA(1:N) are the computed singular values */
  354. /* of A. (See the description of SVA().) */
  355. /* WORK(2) = See the description of WORK(1). */
  356. /* WORK(3) = SCONDA is an estimate for the condition number of */
  357. /* column equilibrated A. (If JOBA .EQ. 'E' or 'G') */
  358. /* SCONDA is an estimate of DSQRT(||(R^t * R)^(-1)||_1). */
  359. /* It is computed using DPOCON. It holds */
  360. /* N^(-1/4) * SCONDA <= ||R^(-1)||_2 <= N^(1/4) * SCONDA */
  361. /* where R is the triangular factor from the QRF of A. */
  362. /* However, if R is truncated and the numerical rank is */
  363. /* determined to be strictly smaller than N, SCONDA is */
  364. /* returned as -1, thus indicating that the smallest */
  365. /* singular values might be lost. */
  366. /* If full SVD is needed, the following two condition numbers are */
  367. /* useful for the analysis of the algorithm. They are provied for */
  368. /* a developer/implementer who is familiar with the details of */
  369. /* the method. */
  370. /* WORK(4) = an estimate of the scaled condition number of the */
  371. /* triangular factor in the first QR factorization. */
  372. /* WORK(5) = an estimate of the scaled condition number of the */
  373. /* triangular factor in the second QR factorization. */
  374. /* The following two parameters are computed if JOBT .EQ. 'T'. */
  375. /* They are provided for a developer/implementer who is familiar */
  376. /* with the details of the method. */
  377. /* WORK(6) = the entropy of A^t*A :: this is the Shannon entropy */
  378. /* of diag(A^t*A) / Trace(A^t*A) taken as point in the */
  379. /* probability simplex. */
  380. /* WORK(7) = the entropy of A*A^t. */
  381. /* LWORK (input) INTEGER */
  382. /* Length of WORK to confirm proper allocation of work space. */
  383. /* LWORK depends on the job: */
  384. /* If only SIGMA is needed ( JOBU.EQ.'N', JOBV.EQ.'N' ) and */
  385. /* -> .. no scaled condition estimate required ( JOBE.EQ.'N'): */
  386. /* LWORK >= max(2*M+N,4*N+1,7). This is the minimal requirement. */
  387. /* For optimal performance (blocked code) the optimal value */
  388. /* is LWORK >= max(2*M+N,3*N+(N+1)*NB,7). Here NB is the optimal */
  389. /* block size for xGEQP3/xGEQRF. */
  390. /* -> .. an estimate of the scaled condition number of A is */
  391. /* required (JOBA='E', 'G'). In this case, LWORK is the maximum */
  392. /* of the above and N*N+4*N, i.e. LWORK >= max(2*M+N,N*N+4N,7). */
  393. /* If SIGMA and the right singular vectors are needed (JOBV.EQ.'V'), */
  394. /* -> the minimal requirement is LWORK >= max(2*N+M,7). */
  395. /* -> For optimal performance, LWORK >= max(2*N+M,2*N+N*NB,7), */
  396. /* where NB is the optimal block size. */
  397. /* If SIGMA and the left singular vectors are needed */
  398. /* -> the minimal requirement is LWORK >= max(2*N+M,7). */
  399. /* -> For optimal performance, LWORK >= max(2*N+M,2*N+N*NB,7), */
  400. /* where NB is the optimal block size. */
  401. /* If full SVD is needed ( JOBU.EQ.'U' or 'F', JOBV.EQ.'V' ) and */
  402. /* -> .. the singular vectors are computed without explicit */
  403. /* accumulation of the Jacobi rotations, LWORK >= 6*N+2*N*N */
  404. /* -> .. in the iterative part, the Jacobi rotations are */
  405. /* explicitly accumulated (option, see the description of JOBV), */
  406. /* then the minimal requirement is LWORK >= max(M+3*N+N*N,7). */
  407. /* For better performance, if NB is the optimal block size, */
  408. /* LWORK >= max(3*N+N*N+M,3*N+N*N+N*NB,7). */
  409. /* IWORK (workspace/output) INTEGER array, dimension M+3*N. */
  410. /* On exit, */
  411. /* IWORK(1) = the numerical rank determined after the initial */
  412. /* QR factorization with pivoting. See the descriptions */
  413. /* of JOBA and JOBR. */
  414. /* IWORK(2) = the number of the computed nonzero singular values */
  415. /* IWORK(3) = if nonzero, a warning message: */
  416. /* If IWORK(3).EQ.1 then some of the column norms of A */
  417. /* were denormalized floats. The requested high accuracy */
  418. /* is not warranted by the data. */
  419. /* INFO (output) INTEGER */
  420. /* < 0 : if INFO = -i, then the i-th argument had an illegal value. */
  421. /* = 0 : successfull exit; */
  422. /* > 0 : DGEJSV did not converge in the maximal allowed number */
  423. /* of sweeps. The computed values may be inaccurate. */
  424. /* ............................................................................ */
  425. /* Local Parameters: */
  426. /* Local Scalars: */
  427. /* Intrinsic Functions: */
  428. /* External Functions: */
  429. /* External Subroutines ( BLAS, LAPACK ): */
  430. /* ............................................................................ */
  431. /* Test the input arguments */
  432. /* Parameter adjustments */
  433. --sva;
  434. a_dim1 = *lda;
  435. a_offset = 1 + a_dim1;
  436. a -= a_offset;
  437. u_dim1 = *ldu;
  438. u_offset = 1 + u_dim1;
  439. u -= u_offset;
  440. v_dim1 = *ldv;
  441. v_offset = 1 + v_dim1;
  442. v -= v_offset;
  443. --work;
  444. --iwork;
  445. /* Function Body */
  446. lsvec = _starpu_lsame_(jobu, "U") || _starpu_lsame_(jobu, "F");
  447. jracc = _starpu_lsame_(jobv, "J");
  448. rsvec = _starpu_lsame_(jobv, "V") || jracc;
  449. rowpiv = _starpu_lsame_(joba, "F") || _starpu_lsame_(joba, "G");
  450. l2rank = _starpu_lsame_(joba, "R");
  451. l2aber = _starpu_lsame_(joba, "A");
  452. errest = _starpu_lsame_(joba, "E") || _starpu_lsame_(joba, "G");
  453. l2tran = _starpu_lsame_(jobt, "T");
  454. l2kill = _starpu_lsame_(jobr, "R");
  455. defr = _starpu_lsame_(jobr, "N");
  456. l2pert = _starpu_lsame_(jobp, "P");
  457. if (! (rowpiv || l2rank || l2aber || errest || _starpu_lsame_(joba, "C"))) {
  458. *info = -1;
  459. } else if (! (lsvec || _starpu_lsame_(jobu, "N") || _starpu_lsame_(
  460. jobu, "W"))) {
  461. *info = -2;
  462. } else if (! (rsvec || _starpu_lsame_(jobv, "N") || _starpu_lsame_(
  463. jobv, "W")) || jracc && ! lsvec) {
  464. *info = -3;
  465. } else if (! (l2kill || defr)) {
  466. *info = -4;
  467. } else if (! (l2tran || _starpu_lsame_(jobt, "N"))) {
  468. *info = -5;
  469. } else if (! (l2pert || _starpu_lsame_(jobp, "N"))) {
  470. *info = -6;
  471. } else if (*m < 0) {
  472. *info = -7;
  473. } else if (*n < 0 || *n > *m) {
  474. *info = -8;
  475. } else if (*lda < *m) {
  476. *info = -10;
  477. } else if (lsvec && *ldu < *m) {
  478. *info = -13;
  479. } else if (rsvec && *ldv < *n) {
  480. *info = -14;
  481. } else /* if(complicated condition) */ {
  482. /* Computing MAX */
  483. i__1 = 7, i__2 = (*n << 2) + 1, i__1 = max(i__1,i__2), i__2 = (*m <<
  484. 1) + *n;
  485. /* Computing MAX */
  486. i__3 = 7, i__4 = (*n << 2) + *n * *n, i__3 = max(i__3,i__4), i__4 = (*
  487. m << 1) + *n;
  488. /* Computing MAX */
  489. i__5 = 7, i__6 = (*n << 1) + *m;
  490. /* Computing MAX */
  491. i__7 = 7, i__8 = (*n << 1) + *m;
  492. /* Computing MAX */
  493. i__9 = 7, i__10 = *m + *n * 3 + *n * *n;
  494. if (! (lsvec || rsvec || errest) && *lwork < max(i__1,i__2) || ! (
  495. lsvec || lsvec) && errest && *lwork < max(i__3,i__4) || lsvec
  496. && ! rsvec && *lwork < max(i__5,i__6) || rsvec && ! lsvec && *
  497. lwork < max(i__7,i__8) || lsvec && rsvec && ! jracc && *lwork
  498. < *n * 6 + (*n << 1) * *n || lsvec && rsvec && jracc && *
  499. lwork < max(i__9,i__10)) {
  500. *info = -17;
  501. } else {
  502. /* #:) */
  503. *info = 0;
  504. }
  505. }
  506. if (*info != 0) {
  507. /* #:( */
  508. i__1 = -(*info);
  509. _starpu_xerbla_("DGEJSV", &i__1);
  510. }
  511. /* Quick return for void matrix (Y3K safe) */
  512. /* #:) */
  513. if (*m == 0 || *n == 0) {
  514. return 0;
  515. }
  516. /* Determine whether the matrix U should be M x N or M x M */
  517. if (lsvec) {
  518. n1 = *n;
  519. if (_starpu_lsame_(jobu, "F")) {
  520. n1 = *m;
  521. }
  522. }
  523. /* Set numerical parameters */
  524. /* ! NOTE: Make sure DLAMCH() does not fail on the target architecture. */
  525. epsln = _starpu_dlamch_("Epsilon");
  526. sfmin = _starpu_dlamch_("SafeMinimum");
  527. small = sfmin / epsln;
  528. big = _starpu_dlamch_("O");
  529. /* BIG = ONE / SFMIN */
  530. /* Initialize SVA(1:N) = diag( ||A e_i||_2 )_1^N */
  531. /* (!) If necessary, scale SVA() to protect the largest norm from */
  532. /* overflow. It is possible that this scaling pushes the smallest */
  533. /* column norm left from the underflow threshold (extreme case). */
  534. scalem = 1. / sqrt((doublereal) (*m) * (doublereal) (*n));
  535. noscal = TRUE_;
  536. goscal = TRUE_;
  537. i__1 = *n;
  538. for (p = 1; p <= i__1; ++p) {
  539. aapp = 0.;
  540. aaqq = 0.;
  541. _starpu_dlassq_(m, &a[p * a_dim1 + 1], &c__1, &aapp, &aaqq);
  542. if (aapp > big) {
  543. *info = -9;
  544. i__2 = -(*info);
  545. _starpu_xerbla_("DGEJSV", &i__2);
  546. return 0;
  547. }
  548. aaqq = sqrt(aaqq);
  549. if (aapp < big / aaqq && noscal) {
  550. sva[p] = aapp * aaqq;
  551. } else {
  552. noscal = FALSE_;
  553. sva[p] = aapp * (aaqq * scalem);
  554. if (goscal) {
  555. goscal = FALSE_;
  556. i__2 = p - 1;
  557. _starpu_dscal_(&i__2, &scalem, &sva[1], &c__1);
  558. }
  559. }
  560. /* L1874: */
  561. }
  562. if (noscal) {
  563. scalem = 1.;
  564. }
  565. aapp = 0.;
  566. aaqq = big;
  567. i__1 = *n;
  568. for (p = 1; p <= i__1; ++p) {
  569. /* Computing MAX */
  570. d__1 = aapp, d__2 = sva[p];
  571. aapp = max(d__1,d__2);
  572. if (sva[p] != 0.) {
  573. /* Computing MIN */
  574. d__1 = aaqq, d__2 = sva[p];
  575. aaqq = min(d__1,d__2);
  576. }
  577. /* L4781: */
  578. }
  579. /* Quick return for zero M x N matrix */
  580. /* #:) */
  581. if (aapp == 0.) {
  582. if (lsvec) {
  583. _starpu_dlaset_("G", m, &n1, &c_b34, &c_b35, &u[u_offset], ldu)
  584. ;
  585. }
  586. if (rsvec) {
  587. _starpu_dlaset_("G", n, n, &c_b34, &c_b35, &v[v_offset], ldv);
  588. }
  589. work[1] = 1.;
  590. work[2] = 1.;
  591. if (errest) {
  592. work[3] = 1.;
  593. }
  594. if (lsvec && rsvec) {
  595. work[4] = 1.;
  596. work[5] = 1.;
  597. }
  598. if (l2tran) {
  599. work[6] = 0.;
  600. work[7] = 0.;
  601. }
  602. iwork[1] = 0;
  603. iwork[2] = 0;
  604. return 0;
  605. }
  606. /* Issue warning if denormalized column norms detected. Override the */
  607. /* high relative accuracy request. Issue licence to kill columns */
  608. /* (set them to zero) whose norm is less than sigma_max / BIG (roughly). */
  609. /* #:( */
  610. warning = 0;
  611. if (aaqq <= sfmin) {
  612. l2rank = TRUE_;
  613. l2kill = TRUE_;
  614. warning = 1;
  615. }
  616. /* Quick return for one-column matrix */
  617. /* #:) */
  618. if (*n == 1) {
  619. if (lsvec) {
  620. _starpu_dlascl_("G", &c__0, &c__0, &sva[1], &scalem, m, &c__1, &a[a_dim1
  621. + 1], lda, &ierr);
  622. _starpu_dlacpy_("A", m, &c__1, &a[a_offset], lda, &u[u_offset], ldu);
  623. /* computing all M left singular vectors of the M x 1 matrix */
  624. if (n1 != *n) {
  625. i__1 = *lwork - *n;
  626. _starpu_dgeqrf_(m, n, &u[u_offset], ldu, &work[1], &work[*n + 1], &
  627. i__1, &ierr);
  628. i__1 = *lwork - *n;
  629. _starpu_dorgqr_(m, &n1, &c__1, &u[u_offset], ldu, &work[1], &work[*n
  630. + 1], &i__1, &ierr);
  631. _starpu_dcopy_(m, &a[a_dim1 + 1], &c__1, &u[u_dim1 + 1], &c__1);
  632. }
  633. }
  634. if (rsvec) {
  635. v[v_dim1 + 1] = 1.;
  636. }
  637. if (sva[1] < big * scalem) {
  638. sva[1] /= scalem;
  639. scalem = 1.;
  640. }
  641. work[1] = 1. / scalem;
  642. work[2] = 1.;
  643. if (sva[1] != 0.) {
  644. iwork[1] = 1;
  645. if (sva[1] / scalem >= sfmin) {
  646. iwork[2] = 1;
  647. } else {
  648. iwork[2] = 0;
  649. }
  650. } else {
  651. iwork[1] = 0;
  652. iwork[2] = 0;
  653. }
  654. if (errest) {
  655. work[3] = 1.;
  656. }
  657. if (lsvec && rsvec) {
  658. work[4] = 1.;
  659. work[5] = 1.;
  660. }
  661. if (l2tran) {
  662. work[6] = 0.;
  663. work[7] = 0.;
  664. }
  665. return 0;
  666. }
  667. transp = FALSE_;
  668. l2tran = l2tran && *m == *n;
  669. aatmax = -1.;
  670. aatmin = big;
  671. if (rowpiv || l2tran) {
  672. /* Compute the row norms, needed to determine row pivoting sequence */
  673. /* (in the case of heavily row weighted A, row pivoting is strongly */
  674. /* advised) and to collect information needed to compare the */
  675. /* structures of A * A^t and A^t * A (in the case L2TRAN.EQ..TRUE.). */
  676. if (l2tran) {
  677. i__1 = *m;
  678. for (p = 1; p <= i__1; ++p) {
  679. xsc = 0.;
  680. temp1 = 0.;
  681. _starpu_dlassq_(n, &a[p + a_dim1], lda, &xsc, &temp1);
  682. /* DLASSQ gets both the ell_2 and the ell_infinity norm */
  683. /* in one pass through the vector */
  684. work[*m + *n + p] = xsc * scalem;
  685. work[*n + p] = xsc * (scalem * sqrt(temp1));
  686. /* Computing MAX */
  687. d__1 = aatmax, d__2 = work[*n + p];
  688. aatmax = max(d__1,d__2);
  689. if (work[*n + p] != 0.) {
  690. /* Computing MIN */
  691. d__1 = aatmin, d__2 = work[*n + p];
  692. aatmin = min(d__1,d__2);
  693. }
  694. /* L1950: */
  695. }
  696. } else {
  697. i__1 = *m;
  698. for (p = 1; p <= i__1; ++p) {
  699. work[*m + *n + p] = scalem * (d__1 = a[p + _starpu_idamax_(n, &a[p +
  700. a_dim1], lda) * a_dim1], abs(d__1));
  701. /* Computing MAX */
  702. d__1 = aatmax, d__2 = work[*m + *n + p];
  703. aatmax = max(d__1,d__2);
  704. /* Computing MIN */
  705. d__1 = aatmin, d__2 = work[*m + *n + p];
  706. aatmin = min(d__1,d__2);
  707. /* L1904: */
  708. }
  709. }
  710. }
  711. /* For square matrix A try to determine whether A^t would be better */
  712. /* input for the preconditioned Jacobi SVD, with faster convergence. */
  713. /* The decision is based on an O(N) function of the vector of column */
  714. /* and row norms of A, based on the Shannon entropy. This should give */
  715. /* the right choice in most cases when the difference actually matters. */
  716. /* It may fail and pick the slower converging side. */
  717. entra = 0.;
  718. entrat = 0.;
  719. if (l2tran) {
  720. xsc = 0.;
  721. temp1 = 0.;
  722. _starpu_dlassq_(n, &sva[1], &c__1, &xsc, &temp1);
  723. temp1 = 1. / temp1;
  724. entra = 0.;
  725. i__1 = *n;
  726. for (p = 1; p <= i__1; ++p) {
  727. /* Computing 2nd power */
  728. d__1 = sva[p] / xsc;
  729. big1 = d__1 * d__1 * temp1;
  730. if (big1 != 0.) {
  731. entra += big1 * log(big1);
  732. }
  733. /* L1113: */
  734. }
  735. entra = -entra / log((doublereal) (*n));
  736. /* Now, SVA().^2/Trace(A^t * A) is a point in the probability simplex. */
  737. /* It is derived from the diagonal of A^t * A. Do the same with the */
  738. /* diagonal of A * A^t, compute the entropy of the corresponding */
  739. /* probability distribution. Note that A * A^t and A^t * A have the */
  740. /* same trace. */
  741. entrat = 0.;
  742. i__1 = *n + *m;
  743. for (p = *n + 1; p <= i__1; ++p) {
  744. /* Computing 2nd power */
  745. d__1 = work[p] / xsc;
  746. big1 = d__1 * d__1 * temp1;
  747. if (big1 != 0.) {
  748. entrat += big1 * log(big1);
  749. }
  750. /* L1114: */
  751. }
  752. entrat = -entrat / log((doublereal) (*m));
  753. /* Analyze the entropies and decide A or A^t. Smaller entropy */
  754. /* usually means better input for the algorithm. */
  755. transp = entrat < entra;
  756. /* If A^t is better than A, transpose A. */
  757. if (transp) {
  758. /* In an optimal implementation, this trivial transpose */
  759. /* should be replaced with faster transpose. */
  760. i__1 = *n - 1;
  761. for (p = 1; p <= i__1; ++p) {
  762. i__2 = *n;
  763. for (q = p + 1; q <= i__2; ++q) {
  764. temp1 = a[q + p * a_dim1];
  765. a[q + p * a_dim1] = a[p + q * a_dim1];
  766. a[p + q * a_dim1] = temp1;
  767. /* L1116: */
  768. }
  769. /* L1115: */
  770. }
  771. i__1 = *n;
  772. for (p = 1; p <= i__1; ++p) {
  773. work[*m + *n + p] = sva[p];
  774. sva[p] = work[*n + p];
  775. /* L1117: */
  776. }
  777. temp1 = aapp;
  778. aapp = aatmax;
  779. aatmax = temp1;
  780. temp1 = aaqq;
  781. aaqq = aatmin;
  782. aatmin = temp1;
  783. kill = lsvec;
  784. lsvec = rsvec;
  785. rsvec = kill;
  786. rowpiv = TRUE_;
  787. }
  788. }
  789. /* END IF L2TRAN */
  790. /* Scale the matrix so that its maximal singular value remains less */
  791. /* than DSQRT(BIG) -- the matrix is scaled so that its maximal column */
  792. /* has Euclidean norm equal to DSQRT(BIG/N). The only reason to keep */
  793. /* DSQRT(BIG) instead of BIG is the fact that DGEJSV uses LAPACK and */
  794. /* BLAS routines that, in some implementations, are not capable of */
  795. /* working in the full interval [SFMIN,BIG] and that they may provoke */
  796. /* overflows in the intermediate results. If the singular values spread */
  797. /* from SFMIN to BIG, then DGESVJ will compute them. So, in that case, */
  798. /* one should use DGESVJ instead of DGEJSV. */
  799. big1 = sqrt(big);
  800. temp1 = sqrt(big / (doublereal) (*n));
  801. _starpu_dlascl_("G", &c__0, &c__0, &aapp, &temp1, n, &c__1, &sva[1], n, &ierr);
  802. if (aaqq > aapp * sfmin) {
  803. aaqq = aaqq / aapp * temp1;
  804. } else {
  805. aaqq = aaqq * temp1 / aapp;
  806. }
  807. temp1 *= scalem;
  808. _starpu_dlascl_("G", &c__0, &c__0, &aapp, &temp1, m, n, &a[a_offset], lda, &ierr);
  809. /* To undo scaling at the end of this procedure, multiply the */
  810. /* computed singular values with USCAL2 / USCAL1. */
  811. uscal1 = temp1;
  812. uscal2 = aapp;
  813. if (l2kill) {
  814. /* L2KILL enforces computation of nonzero singular values in */
  815. /* the restricted range of condition number of the initial A, */
  816. /* sigma_max(A) / sigma_min(A) approx. DSQRT(BIG)/DSQRT(SFMIN). */
  817. xsc = sqrt(sfmin);
  818. } else {
  819. xsc = small;
  820. /* Now, if the condition number of A is too big, */
  821. /* sigma_max(A) / sigma_min(A) .GT. DSQRT(BIG/N) * EPSLN / SFMIN, */
  822. /* as a precaution measure, the full SVD is computed using DGESVJ */
  823. /* with accumulated Jacobi rotations. This provides numerically */
  824. /* more robust computation, at the cost of slightly increased run */
  825. /* time. Depending on the concrete implementation of BLAS and LAPACK */
  826. /* (i.e. how they behave in presence of extreme ill-conditioning) the */
  827. /* implementor may decide to remove this switch. */
  828. if (aaqq < sqrt(sfmin) && lsvec && rsvec) {
  829. jracc = TRUE_;
  830. }
  831. }
  832. if (aaqq < xsc) {
  833. i__1 = *n;
  834. for (p = 1; p <= i__1; ++p) {
  835. if (sva[p] < xsc) {
  836. _starpu_dlaset_("A", m, &c__1, &c_b34, &c_b34, &a[p * a_dim1 + 1],
  837. lda);
  838. sva[p] = 0.;
  839. }
  840. /* L700: */
  841. }
  842. }
  843. /* Preconditioning using QR factorization with pivoting */
  844. if (rowpiv) {
  845. /* Optional row permutation (Bjoerck row pivoting): */
  846. /* A result by Cox and Higham shows that the Bjoerck's */
  847. /* row pivoting combined with standard column pivoting */
  848. /* has similar effect as Powell-Reid complete pivoting. */
  849. /* The ell-infinity norms of A are made nonincreasing. */
  850. i__1 = *m - 1;
  851. for (p = 1; p <= i__1; ++p) {
  852. i__2 = *m - p + 1;
  853. q = _starpu_idamax_(&i__2, &work[*m + *n + p], &c__1) + p - 1;
  854. iwork[(*n << 1) + p] = q;
  855. if (p != q) {
  856. temp1 = work[*m + *n + p];
  857. work[*m + *n + p] = work[*m + *n + q];
  858. work[*m + *n + q] = temp1;
  859. }
  860. /* L1952: */
  861. }
  862. i__1 = *m - 1;
  863. _starpu_dlaswp_(n, &a[a_offset], lda, &c__1, &i__1, &iwork[(*n << 1) + 1], &
  864. c__1);
  865. }
  866. /* End of the preparation phase (scaling, optional sorting and */
  867. /* transposing, optional flushing of small columns). */
  868. /* Preconditioning */
  869. /* If the full SVD is needed, the right singular vectors are computed */
  870. /* from a matrix equation, and for that we need theoretical analysis */
  871. /* of the Businger-Golub pivoting. So we use DGEQP3 as the first RR QRF. */
  872. /* In all other cases the first RR QRF can be chosen by other criteria */
  873. /* (eg speed by replacing global with restricted window pivoting, such */
  874. /* as in SGEQPX from TOMS # 782). Good results will be obtained using */
  875. /* SGEQPX with properly (!) chosen numerical parameters. */
  876. /* Any improvement of DGEQP3 improves overal performance of DGEJSV. */
  877. /* A * P1 = Q1 * [ R1^t 0]^t: */
  878. i__1 = *n;
  879. for (p = 1; p <= i__1; ++p) {
  880. /* .. all columns are free columns */
  881. iwork[p] = 0;
  882. /* L1963: */
  883. }
  884. i__1 = *lwork - *n;
  885. _starpu_dgeqp3_(m, n, &a[a_offset], lda, &iwork[1], &work[1], &work[*n + 1], &
  886. i__1, &ierr);
  887. /* The upper triangular matrix R1 from the first QRF is inspected for */
  888. /* rank deficiency and possibilities for deflation, or possible */
  889. /* ill-conditioning. Depending on the user specified flag L2RANK, */
  890. /* the procedure explores possibilities to reduce the numerical */
  891. /* rank by inspecting the computed upper triangular factor. If */
  892. /* L2RANK or L2ABER are up, then DGEJSV will compute the SVD of */
  893. /* A + dA, where ||dA|| <= f(M,N)*EPSLN. */
  894. nr = 1;
  895. if (l2aber) {
  896. /* Standard absolute error bound suffices. All sigma_i with */
  897. /* sigma_i < N*EPSLN*||A|| are flushed to zero. This is an */
  898. /* agressive enforcement of lower numerical rank by introducing a */
  899. /* backward error of the order of N*EPSLN*||A||. */
  900. temp1 = sqrt((doublereal) (*n)) * epsln;
  901. i__1 = *n;
  902. for (p = 2; p <= i__1; ++p) {
  903. if ((d__2 = a[p + p * a_dim1], abs(d__2)) >= temp1 * (d__1 = a[
  904. a_dim1 + 1], abs(d__1))) {
  905. ++nr;
  906. } else {
  907. goto L3002;
  908. }
  909. /* L3001: */
  910. }
  911. L3002:
  912. ;
  913. } else if (l2rank) {
  914. /* .. similarly as above, only slightly more gentle (less agressive). */
  915. /* Sudden drop on the diagonal of R1 is used as the criterion for */
  916. /* close-to-rank-defficient. */
  917. temp1 = sqrt(sfmin);
  918. i__1 = *n;
  919. for (p = 2; p <= i__1; ++p) {
  920. if ((d__2 = a[p + p * a_dim1], abs(d__2)) < epsln * (d__1 = a[p -
  921. 1 + (p - 1) * a_dim1], abs(d__1)) || (d__3 = a[p + p *
  922. a_dim1], abs(d__3)) < small || l2kill && (d__4 = a[p + p *
  923. a_dim1], abs(d__4)) < temp1) {
  924. goto L3402;
  925. }
  926. ++nr;
  927. /* L3401: */
  928. }
  929. L3402:
  930. ;
  931. } else {
  932. /* The goal is high relative accuracy. However, if the matrix */
  933. /* has high scaled condition number the relative accuracy is in */
  934. /* general not feasible. Later on, a condition number estimator */
  935. /* will be deployed to estimate the scaled condition number. */
  936. /* Here we just remove the underflowed part of the triangular */
  937. /* factor. This prevents the situation in which the code is */
  938. /* working hard to get the accuracy not warranted by the data. */
  939. temp1 = sqrt(sfmin);
  940. i__1 = *n;
  941. for (p = 2; p <= i__1; ++p) {
  942. if ((d__1 = a[p + p * a_dim1], abs(d__1)) < small || l2kill && (
  943. d__2 = a[p + p * a_dim1], abs(d__2)) < temp1) {
  944. goto L3302;
  945. }
  946. ++nr;
  947. /* L3301: */
  948. }
  949. L3302:
  950. ;
  951. }
  952. almort = FALSE_;
  953. if (nr == *n) {
  954. maxprj = 1.;
  955. i__1 = *n;
  956. for (p = 2; p <= i__1; ++p) {
  957. temp1 = (d__1 = a[p + p * a_dim1], abs(d__1)) / sva[iwork[p]];
  958. maxprj = min(maxprj,temp1);
  959. /* L3051: */
  960. }
  961. /* Computing 2nd power */
  962. d__1 = maxprj;
  963. if (d__1 * d__1 >= 1. - (doublereal) (*n) * epsln) {
  964. almort = TRUE_;
  965. }
  966. }
  967. sconda = -1.;
  968. condr1 = -1.;
  969. condr2 = -1.;
  970. if (errest) {
  971. if (*n == nr) {
  972. if (rsvec) {
  973. /* .. V is available as workspace */
  974. _starpu_dlacpy_("U", n, n, &a[a_offset], lda, &v[v_offset], ldv);
  975. i__1 = *n;
  976. for (p = 1; p <= i__1; ++p) {
  977. temp1 = sva[iwork[p]];
  978. d__1 = 1. / temp1;
  979. _starpu_dscal_(&p, &d__1, &v[p * v_dim1 + 1], &c__1);
  980. /* L3053: */
  981. }
  982. _starpu_dpocon_("U", n, &v[v_offset], ldv, &c_b35, &temp1, &work[*n +
  983. 1], &iwork[(*n << 1) + *m + 1], &ierr);
  984. } else if (lsvec) {
  985. /* .. U is available as workspace */
  986. _starpu_dlacpy_("U", n, n, &a[a_offset], lda, &u[u_offset], ldu);
  987. i__1 = *n;
  988. for (p = 1; p <= i__1; ++p) {
  989. temp1 = sva[iwork[p]];
  990. d__1 = 1. / temp1;
  991. _starpu_dscal_(&p, &d__1, &u[p * u_dim1 + 1], &c__1);
  992. /* L3054: */
  993. }
  994. _starpu_dpocon_("U", n, &u[u_offset], ldu, &c_b35, &temp1, &work[*n +
  995. 1], &iwork[(*n << 1) + *m + 1], &ierr);
  996. } else {
  997. _starpu_dlacpy_("U", n, n, &a[a_offset], lda, &work[*n + 1], n);
  998. i__1 = *n;
  999. for (p = 1; p <= i__1; ++p) {
  1000. temp1 = sva[iwork[p]];
  1001. d__1 = 1. / temp1;
  1002. _starpu_dscal_(&p, &d__1, &work[*n + (p - 1) * *n + 1], &c__1);
  1003. /* L3052: */
  1004. }
  1005. /* .. the columns of R are scaled to have unit Euclidean lengths. */
  1006. _starpu_dpocon_("U", n, &work[*n + 1], n, &c_b35, &temp1, &work[*n + *
  1007. n * *n + 1], &iwork[(*n << 1) + *m + 1], &ierr);
  1008. }
  1009. sconda = 1. / sqrt(temp1);
  1010. /* SCONDA is an estimate of DSQRT(||(R^t * R)^(-1)||_1). */
  1011. /* N^(-1/4) * SCONDA <= ||R^(-1)||_2 <= N^(1/4) * SCONDA */
  1012. } else {
  1013. sconda = -1.;
  1014. }
  1015. }
  1016. l2pert = l2pert && (d__1 = a[a_dim1 + 1] / a[nr + nr * a_dim1], abs(d__1))
  1017. > sqrt(big1);
  1018. /* If there is no violent scaling, artificial perturbation is not needed. */
  1019. /* Phase 3: */
  1020. if (! (rsvec || lsvec)) {
  1021. /* Singular Values only */
  1022. /* .. transpose A(1:NR,1:N) */
  1023. /* Computing MIN */
  1024. i__2 = *n - 1;
  1025. i__1 = min(i__2,nr);
  1026. for (p = 1; p <= i__1; ++p) {
  1027. i__2 = *n - p;
  1028. _starpu_dcopy_(&i__2, &a[p + (p + 1) * a_dim1], lda, &a[p + 1 + p *
  1029. a_dim1], &c__1);
  1030. /* L1946: */
  1031. }
  1032. /* The following two DO-loops introduce small relative perturbation */
  1033. /* into the strict upper triangle of the lower triangular matrix. */
  1034. /* Small entries below the main diagonal are also changed. */
  1035. /* This modification is useful if the computing environment does not */
  1036. /* provide/allow FLUSH TO ZERO underflow, for it prevents many */
  1037. /* annoying denormalized numbers in case of strongly scaled matrices. */
  1038. /* The perturbation is structured so that it does not introduce any */
  1039. /* new perturbation of the singular values, and it does not destroy */
  1040. /* the job done by the preconditioner. */
  1041. /* The licence for this perturbation is in the variable L2PERT, which */
  1042. /* should be .FALSE. if FLUSH TO ZERO underflow is active. */
  1043. if (! almort) {
  1044. if (l2pert) {
  1045. /* XSC = DSQRT(SMALL) */
  1046. xsc = epsln / (doublereal) (*n);
  1047. i__1 = nr;
  1048. for (q = 1; q <= i__1; ++q) {
  1049. temp1 = xsc * (d__1 = a[q + q * a_dim1], abs(d__1));
  1050. i__2 = *n;
  1051. for (p = 1; p <= i__2; ++p) {
  1052. if (p > q && (d__1 = a[p + q * a_dim1], abs(d__1)) <=
  1053. temp1 || p < q) {
  1054. a[p + q * a_dim1] = d_sign(&temp1, &a[p + q *
  1055. a_dim1]);
  1056. }
  1057. /* L4949: */
  1058. }
  1059. /* L4947: */
  1060. }
  1061. } else {
  1062. i__1 = nr - 1;
  1063. i__2 = nr - 1;
  1064. _starpu_dlaset_("U", &i__1, &i__2, &c_b34, &c_b34, &a[(a_dim1 << 1) +
  1065. 1], lda);
  1066. }
  1067. /* .. second preconditioning using the QR factorization */
  1068. i__1 = *lwork - *n;
  1069. _starpu_dgeqrf_(n, &nr, &a[a_offset], lda, &work[1], &work[*n + 1], &i__1,
  1070. &ierr);
  1071. /* .. and transpose upper to lower triangular */
  1072. i__1 = nr - 1;
  1073. for (p = 1; p <= i__1; ++p) {
  1074. i__2 = nr - p;
  1075. _starpu_dcopy_(&i__2, &a[p + (p + 1) * a_dim1], lda, &a[p + 1 + p *
  1076. a_dim1], &c__1);
  1077. /* L1948: */
  1078. }
  1079. }
  1080. /* Row-cyclic Jacobi SVD algorithm with column pivoting */
  1081. /* .. again some perturbation (a "background noise") is added */
  1082. /* to drown denormals */
  1083. if (l2pert) {
  1084. /* XSC = DSQRT(SMALL) */
  1085. xsc = epsln / (doublereal) (*n);
  1086. i__1 = nr;
  1087. for (q = 1; q <= i__1; ++q) {
  1088. temp1 = xsc * (d__1 = a[q + q * a_dim1], abs(d__1));
  1089. i__2 = nr;
  1090. for (p = 1; p <= i__2; ++p) {
  1091. if (p > q && (d__1 = a[p + q * a_dim1], abs(d__1)) <=
  1092. temp1 || p < q) {
  1093. a[p + q * a_dim1] = d_sign(&temp1, &a[p + q * a_dim1])
  1094. ;
  1095. }
  1096. /* L1949: */
  1097. }
  1098. /* L1947: */
  1099. }
  1100. } else {
  1101. i__1 = nr - 1;
  1102. i__2 = nr - 1;
  1103. _starpu_dlaset_("U", &i__1, &i__2, &c_b34, &c_b34, &a[(a_dim1 << 1) + 1],
  1104. lda);
  1105. }
  1106. /* .. and one-sided Jacobi rotations are started on a lower */
  1107. /* triangular matrix (plus perturbation which is ignored in */
  1108. /* the part which destroys triangular form (confusing?!)) */
  1109. _starpu_dgesvj_("L", "NoU", "NoV", &nr, &nr, &a[a_offset], lda, &sva[1], n, &
  1110. v[v_offset], ldv, &work[1], lwork, info);
  1111. scalem = work[1];
  1112. numrank = i_dnnt(&work[2]);
  1113. } else if (rsvec && ! lsvec) {
  1114. /* -> Singular Values and Right Singular Vectors <- */
  1115. if (almort) {
  1116. /* .. in this case NR equals N */
  1117. i__1 = nr;
  1118. for (p = 1; p <= i__1; ++p) {
  1119. i__2 = *n - p + 1;
  1120. _starpu_dcopy_(&i__2, &a[p + p * a_dim1], lda, &v[p + p * v_dim1], &
  1121. c__1);
  1122. /* L1998: */
  1123. }
  1124. i__1 = nr - 1;
  1125. i__2 = nr - 1;
  1126. _starpu_dlaset_("Upper", &i__1, &i__2, &c_b34, &c_b34, &v[(v_dim1 << 1) +
  1127. 1], ldv);
  1128. _starpu_dgesvj_("L", "U", "N", n, &nr, &v[v_offset], ldv, &sva[1], &nr, &
  1129. a[a_offset], lda, &work[1], lwork, info);
  1130. scalem = work[1];
  1131. numrank = i_dnnt(&work[2]);
  1132. } else {
  1133. /* .. two more QR factorizations ( one QRF is not enough, two require */
  1134. /* accumulated product of Jacobi rotations, three are perfect ) */
  1135. i__1 = nr - 1;
  1136. i__2 = nr - 1;
  1137. _starpu_dlaset_("Lower", &i__1, &i__2, &c_b34, &c_b34, &a[a_dim1 + 2],
  1138. lda);
  1139. i__1 = *lwork - *n;
  1140. _starpu_dgelqf_(&nr, n, &a[a_offset], lda, &work[1], &work[*n + 1], &i__1,
  1141. &ierr);
  1142. _starpu_dlacpy_("Lower", &nr, &nr, &a[a_offset], lda, &v[v_offset], ldv);
  1143. i__1 = nr - 1;
  1144. i__2 = nr - 1;
  1145. _starpu_dlaset_("Upper", &i__1, &i__2, &c_b34, &c_b34, &v[(v_dim1 << 1) +
  1146. 1], ldv);
  1147. i__1 = *lwork - (*n << 1);
  1148. _starpu_dgeqrf_(&nr, &nr, &v[v_offset], ldv, &work[*n + 1], &work[(*n <<
  1149. 1) + 1], &i__1, &ierr);
  1150. i__1 = nr;
  1151. for (p = 1; p <= i__1; ++p) {
  1152. i__2 = nr - p + 1;
  1153. _starpu_dcopy_(&i__2, &v[p + p * v_dim1], ldv, &v[p + p * v_dim1], &
  1154. c__1);
  1155. /* L8998: */
  1156. }
  1157. i__1 = nr - 1;
  1158. i__2 = nr - 1;
  1159. _starpu_dlaset_("Upper", &i__1, &i__2, &c_b34, &c_b34, &v[(v_dim1 << 1) +
  1160. 1], ldv);
  1161. _starpu_dgesvj_("Lower", "U", "N", &nr, &nr, &v[v_offset], ldv, &sva[1], &
  1162. nr, &u[u_offset], ldu, &work[*n + 1], lwork, info);
  1163. scalem = work[*n + 1];
  1164. numrank = i_dnnt(&work[*n + 2]);
  1165. if (nr < *n) {
  1166. i__1 = *n - nr;
  1167. _starpu_dlaset_("A", &i__1, &nr, &c_b34, &c_b34, &v[nr + 1 + v_dim1],
  1168. ldv);
  1169. i__1 = *n - nr;
  1170. _starpu_dlaset_("A", &nr, &i__1, &c_b34, &c_b34, &v[(nr + 1) * v_dim1
  1171. + 1], ldv);
  1172. i__1 = *n - nr;
  1173. i__2 = *n - nr;
  1174. _starpu_dlaset_("A", &i__1, &i__2, &c_b34, &c_b35, &v[nr + 1 + (nr +
  1175. 1) * v_dim1], ldv);
  1176. }
  1177. i__1 = *lwork - *n;
  1178. _starpu_dormlq_("Left", "Transpose", n, n, &nr, &a[a_offset], lda, &work[
  1179. 1], &v[v_offset], ldv, &work[*n + 1], &i__1, &ierr);
  1180. }
  1181. i__1 = *n;
  1182. for (p = 1; p <= i__1; ++p) {
  1183. _starpu_dcopy_(n, &v[p + v_dim1], ldv, &a[iwork[p] + a_dim1], lda);
  1184. /* L8991: */
  1185. }
  1186. _starpu_dlacpy_("All", n, n, &a[a_offset], lda, &v[v_offset], ldv);
  1187. if (transp) {
  1188. _starpu_dlacpy_("All", n, n, &v[v_offset], ldv, &u[u_offset], ldu);
  1189. }
  1190. } else if (lsvec && ! rsvec) {
  1191. /* -#- Singular Values and Left Singular Vectors -#- */
  1192. /* .. second preconditioning step to avoid need to accumulate */
  1193. /* Jacobi rotations in the Jacobi iterations. */
  1194. i__1 = nr;
  1195. for (p = 1; p <= i__1; ++p) {
  1196. i__2 = *n - p + 1;
  1197. _starpu_dcopy_(&i__2, &a[p + p * a_dim1], lda, &u[p + p * u_dim1], &c__1);
  1198. /* L1965: */
  1199. }
  1200. i__1 = nr - 1;
  1201. i__2 = nr - 1;
  1202. _starpu_dlaset_("Upper", &i__1, &i__2, &c_b34, &c_b34, &u[(u_dim1 << 1) + 1],
  1203. ldu);
  1204. i__1 = *lwork - (*n << 1);
  1205. _starpu_dgeqrf_(n, &nr, &u[u_offset], ldu, &work[*n + 1], &work[(*n << 1) + 1]
  1206. , &i__1, &ierr);
  1207. i__1 = nr - 1;
  1208. for (p = 1; p <= i__1; ++p) {
  1209. i__2 = nr - p;
  1210. _starpu_dcopy_(&i__2, &u[p + (p + 1) * u_dim1], ldu, &u[p + 1 + p *
  1211. u_dim1], &c__1);
  1212. /* L1967: */
  1213. }
  1214. i__1 = nr - 1;
  1215. i__2 = nr - 1;
  1216. _starpu_dlaset_("Upper", &i__1, &i__2, &c_b34, &c_b34, &u[(u_dim1 << 1) + 1],
  1217. ldu);
  1218. i__1 = *lwork - *n;
  1219. _starpu_dgesvj_("Lower", "U", "N", &nr, &nr, &u[u_offset], ldu, &sva[1], &nr,
  1220. &a[a_offset], lda, &work[*n + 1], &i__1, info);
  1221. scalem = work[*n + 1];
  1222. numrank = i_dnnt(&work[*n + 2]);
  1223. if (nr < *m) {
  1224. i__1 = *m - nr;
  1225. _starpu_dlaset_("A", &i__1, &nr, &c_b34, &c_b34, &u[nr + 1 + u_dim1], ldu);
  1226. if (nr < n1) {
  1227. i__1 = n1 - nr;
  1228. _starpu_dlaset_("A", &nr, &i__1, &c_b34, &c_b34, &u[(nr + 1) * u_dim1
  1229. + 1], ldu);
  1230. i__1 = *m - nr;
  1231. i__2 = n1 - nr;
  1232. _starpu_dlaset_("A", &i__1, &i__2, &c_b34, &c_b35, &u[nr + 1 + (nr +
  1233. 1) * u_dim1], ldu);
  1234. }
  1235. }
  1236. i__1 = *lwork - *n;
  1237. _starpu_dormqr_("Left", "No Tr", m, &n1, n, &a[a_offset], lda, &work[1], &u[
  1238. u_offset], ldu, &work[*n + 1], &i__1, &ierr);
  1239. if (rowpiv) {
  1240. i__1 = *m - 1;
  1241. _starpu_dlaswp_(&n1, &u[u_offset], ldu, &c__1, &i__1, &iwork[(*n << 1) +
  1242. 1], &c_n1);
  1243. }
  1244. i__1 = n1;
  1245. for (p = 1; p <= i__1; ++p) {
  1246. xsc = 1. / _starpu_dnrm2_(m, &u[p * u_dim1 + 1], &c__1);
  1247. _starpu_dscal_(m, &xsc, &u[p * u_dim1 + 1], &c__1);
  1248. /* L1974: */
  1249. }
  1250. if (transp) {
  1251. _starpu_dlacpy_("All", n, n, &u[u_offset], ldu, &v[v_offset], ldv);
  1252. }
  1253. } else {
  1254. /* -#- Full SVD -#- */
  1255. if (! jracc) {
  1256. if (! almort) {
  1257. /* Second Preconditioning Step (QRF [with pivoting]) */
  1258. /* Note that the composition of TRANSPOSE, QRF and TRANSPOSE is */
  1259. /* equivalent to an LQF CALL. Since in many libraries the QRF */
  1260. /* seems to be better optimized than the LQF, we do explicit */
  1261. /* transpose and use the QRF. This is subject to changes in an */
  1262. /* optimized implementation of DGEJSV. */
  1263. i__1 = nr;
  1264. for (p = 1; p <= i__1; ++p) {
  1265. i__2 = *n - p + 1;
  1266. _starpu_dcopy_(&i__2, &a[p + p * a_dim1], lda, &v[p + p * v_dim1],
  1267. &c__1);
  1268. /* L1968: */
  1269. }
  1270. /* .. the following two loops perturb small entries to avoid */
  1271. /* denormals in the second QR factorization, where they are */
  1272. /* as good as zeros. This is done to avoid painfully slow */
  1273. /* computation with denormals. The relative size of the perturbation */
  1274. /* is a parameter that can be changed by the implementer. */
  1275. /* This perturbation device will be obsolete on machines with */
  1276. /* properly implemented arithmetic. */
  1277. /* To switch it off, set L2PERT=.FALSE. To remove it from the */
  1278. /* code, remove the action under L2PERT=.TRUE., leave the ELSE part. */
  1279. /* The following two loops should be blocked and fused with the */
  1280. /* transposed copy above. */
  1281. if (l2pert) {
  1282. xsc = sqrt(small);
  1283. i__1 = nr;
  1284. for (q = 1; q <= i__1; ++q) {
  1285. temp1 = xsc * (d__1 = v[q + q * v_dim1], abs(d__1));
  1286. i__2 = *n;
  1287. for (p = 1; p <= i__2; ++p) {
  1288. if (p > q && (d__1 = v[p + q * v_dim1], abs(d__1))
  1289. <= temp1 || p < q) {
  1290. v[p + q * v_dim1] = d_sign(&temp1, &v[p + q *
  1291. v_dim1]);
  1292. }
  1293. if (p < q) {
  1294. v[p + q * v_dim1] = -v[p + q * v_dim1];
  1295. }
  1296. /* L2968: */
  1297. }
  1298. /* L2969: */
  1299. }
  1300. } else {
  1301. i__1 = nr - 1;
  1302. i__2 = nr - 1;
  1303. _starpu_dlaset_("U", &i__1, &i__2, &c_b34, &c_b34, &v[(v_dim1 <<
  1304. 1) + 1], ldv);
  1305. }
  1306. /* Estimate the row scaled condition number of R1 */
  1307. /* (If R1 is rectangular, N > NR, then the condition number */
  1308. /* of the leading NR x NR submatrix is estimated.) */
  1309. _starpu_dlacpy_("L", &nr, &nr, &v[v_offset], ldv, &work[(*n << 1) + 1]
  1310. , &nr);
  1311. i__1 = nr;
  1312. for (p = 1; p <= i__1; ++p) {
  1313. i__2 = nr - p + 1;
  1314. temp1 = _starpu_dnrm2_(&i__2, &work[(*n << 1) + (p - 1) * nr + p],
  1315. &c__1);
  1316. i__2 = nr - p + 1;
  1317. d__1 = 1. / temp1;
  1318. _starpu_dscal_(&i__2, &d__1, &work[(*n << 1) + (p - 1) * nr + p],
  1319. &c__1);
  1320. /* L3950: */
  1321. }
  1322. _starpu_dpocon_("Lower", &nr, &work[(*n << 1) + 1], &nr, &c_b35, &
  1323. temp1, &work[(*n << 1) + nr * nr + 1], &iwork[*m + (*
  1324. n << 1) + 1], &ierr);
  1325. condr1 = 1. / sqrt(temp1);
  1326. /* .. here need a second oppinion on the condition number */
  1327. /* .. then assume worst case scenario */
  1328. /* R1 is OK for inverse <=> CONDR1 .LT. DBLE(N) */
  1329. /* more conservative <=> CONDR1 .LT. DSQRT(DBLE(N)) */
  1330. cond_ok__ = sqrt((doublereal) nr);
  1331. /* [TP] COND_OK is a tuning parameter. */
  1332. if (condr1 < cond_ok__) {
  1333. /* .. the second QRF without pivoting. Note: in an optimized */
  1334. /* implementation, this QRF should be implemented as the QRF */
  1335. /* of a lower triangular matrix. */
  1336. /* R1^t = Q2 * R2 */
  1337. i__1 = *lwork - (*n << 1);
  1338. _starpu_dgeqrf_(n, &nr, &v[v_offset], ldv, &work[*n + 1], &work[(*
  1339. n << 1) + 1], &i__1, &ierr);
  1340. if (l2pert) {
  1341. xsc = sqrt(small) / epsln;
  1342. i__1 = nr;
  1343. for (p = 2; p <= i__1; ++p) {
  1344. i__2 = p - 1;
  1345. for (q = 1; q <= i__2; ++q) {
  1346. /* Computing MIN */
  1347. d__3 = (d__1 = v[p + p * v_dim1], abs(d__1)),
  1348. d__4 = (d__2 = v[q + q * v_dim1], abs(
  1349. d__2));
  1350. temp1 = xsc * min(d__3,d__4);
  1351. if ((d__1 = v[q + p * v_dim1], abs(d__1)) <=
  1352. temp1) {
  1353. v[q + p * v_dim1] = d_sign(&temp1, &v[q +
  1354. p * v_dim1]);
  1355. }
  1356. /* L3958: */
  1357. }
  1358. /* L3959: */
  1359. }
  1360. }
  1361. if (nr != *n) {
  1362. _starpu_dlacpy_("A", n, &nr, &v[v_offset], ldv, &work[(*n <<
  1363. 1) + 1], n);
  1364. }
  1365. /* .. save ... */
  1366. /* .. this transposed copy should be better than naive */
  1367. i__1 = nr - 1;
  1368. for (p = 1; p <= i__1; ++p) {
  1369. i__2 = nr - p;
  1370. _starpu_dcopy_(&i__2, &v[p + (p + 1) * v_dim1], ldv, &v[p + 1
  1371. + p * v_dim1], &c__1);
  1372. /* L1969: */
  1373. }
  1374. condr2 = condr1;
  1375. } else {
  1376. /* .. ill-conditioned case: second QRF with pivoting */
  1377. /* Note that windowed pivoting would be equaly good */
  1378. /* numerically, and more run-time efficient. So, in */
  1379. /* an optimal implementation, the next call to DGEQP3 */
  1380. /* should be replaced with eg. CALL SGEQPX (ACM TOMS #782) */
  1381. /* with properly (carefully) chosen parameters. */
  1382. /* R1^t * P2 = Q2 * R2 */
  1383. i__1 = nr;
  1384. for (p = 1; p <= i__1; ++p) {
  1385. iwork[*n + p] = 0;
  1386. /* L3003: */
  1387. }
  1388. i__1 = *lwork - (*n << 1);
  1389. _starpu_dgeqp3_(n, &nr, &v[v_offset], ldv, &iwork[*n + 1], &work[*
  1390. n + 1], &work[(*n << 1) + 1], &i__1, &ierr);
  1391. /* * CALL DGEQRF( N, NR, V, LDV, WORK(N+1), WORK(2*N+1), */
  1392. /* * & LWORK-2*N, IERR ) */
  1393. if (l2pert) {
  1394. xsc = sqrt(small);
  1395. i__1 = nr;
  1396. for (p = 2; p <= i__1; ++p) {
  1397. i__2 = p - 1;
  1398. for (q = 1; q <= i__2; ++q) {
  1399. /* Computing MIN */
  1400. d__3 = (d__1 = v[p + p * v_dim1], abs(d__1)),
  1401. d__4 = (d__2 = v[q + q * v_dim1], abs(
  1402. d__2));
  1403. temp1 = xsc * min(d__3,d__4);
  1404. if ((d__1 = v[q + p * v_dim1], abs(d__1)) <=
  1405. temp1) {
  1406. v[q + p * v_dim1] = d_sign(&temp1, &v[q +
  1407. p * v_dim1]);
  1408. }
  1409. /* L3968: */
  1410. }
  1411. /* L3969: */
  1412. }
  1413. }
  1414. _starpu_dlacpy_("A", n, &nr, &v[v_offset], ldv, &work[(*n << 1) +
  1415. 1], n);
  1416. if (l2pert) {
  1417. xsc = sqrt(small);
  1418. i__1 = nr;
  1419. for (p = 2; p <= i__1; ++p) {
  1420. i__2 = p - 1;
  1421. for (q = 1; q <= i__2; ++q) {
  1422. /* Computing MIN */
  1423. d__3 = (d__1 = v[p + p * v_dim1], abs(d__1)),
  1424. d__4 = (d__2 = v[q + q * v_dim1], abs(
  1425. d__2));
  1426. temp1 = xsc * min(d__3,d__4);
  1427. v[p + q * v_dim1] = -d_sign(&temp1, &v[q + p *
  1428. v_dim1]);
  1429. /* L8971: */
  1430. }
  1431. /* L8970: */
  1432. }
  1433. } else {
  1434. i__1 = nr - 1;
  1435. i__2 = nr - 1;
  1436. _starpu_dlaset_("L", &i__1, &i__2, &c_b34, &c_b34, &v[v_dim1
  1437. + 2], ldv);
  1438. }
  1439. /* Now, compute R2 = L3 * Q3, the LQ factorization. */
  1440. i__1 = *lwork - (*n << 1) - *n * nr - nr;
  1441. _starpu_dgelqf_(&nr, &nr, &v[v_offset], ldv, &work[(*n << 1) + *n
  1442. * nr + 1], &work[(*n << 1) + *n * nr + nr + 1], &
  1443. i__1, &ierr);
  1444. /* .. and estimate the condition number */
  1445. _starpu_dlacpy_("L", &nr, &nr, &v[v_offset], ldv, &work[(*n << 1)
  1446. + *n * nr + nr + 1], &nr);
  1447. i__1 = nr;
  1448. for (p = 1; p <= i__1; ++p) {
  1449. temp1 = _starpu_dnrm2_(&p, &work[(*n << 1) + *n * nr + nr + p]
  1450. , &nr);
  1451. d__1 = 1. / temp1;
  1452. _starpu_dscal_(&p, &d__1, &work[(*n << 1) + *n * nr + nr + p],
  1453. &nr);
  1454. /* L4950: */
  1455. }
  1456. _starpu_dpocon_("L", &nr, &work[(*n << 1) + *n * nr + nr + 1], &
  1457. nr, &c_b35, &temp1, &work[(*n << 1) + *n * nr +
  1458. nr + nr * nr + 1], &iwork[*m + (*n << 1) + 1], &
  1459. ierr);
  1460. condr2 = 1. / sqrt(temp1);
  1461. if (condr2 >= cond_ok__) {
  1462. /* .. save the Householder vectors used for Q3 */
  1463. /* (this overwrittes the copy of R2, as it will not be */
  1464. /* needed in this branch, but it does not overwritte the */
  1465. /* Huseholder vectors of Q2.). */
  1466. _starpu_dlacpy_("U", &nr, &nr, &v[v_offset], ldv, &work[(*n <<
  1467. 1) + 1], n);
  1468. /* .. and the rest of the information on Q3 is in */
  1469. /* WORK(2*N+N*NR+1:2*N+N*NR+N) */
  1470. }
  1471. }
  1472. if (l2pert) {
  1473. xsc = sqrt(small);
  1474. i__1 = nr;
  1475. for (q = 2; q <= i__1; ++q) {
  1476. temp1 = xsc * v[q + q * v_dim1];
  1477. i__2 = q - 1;
  1478. for (p = 1; p <= i__2; ++p) {
  1479. /* V(p,q) = - DSIGN( TEMP1, V(q,p) ) */
  1480. v[p + q * v_dim1] = -d_sign(&temp1, &v[p + q *
  1481. v_dim1]);
  1482. /* L4969: */
  1483. }
  1484. /* L4968: */
  1485. }
  1486. } else {
  1487. i__1 = nr - 1;
  1488. i__2 = nr - 1;
  1489. _starpu_dlaset_("U", &i__1, &i__2, &c_b34, &c_b34, &v[(v_dim1 <<
  1490. 1) + 1], ldv);
  1491. }
  1492. /* Second preconditioning finished; continue with Jacobi SVD */
  1493. /* The input matrix is lower trinagular. */
  1494. /* Recover the right singular vectors as solution of a well */
  1495. /* conditioned triangular matrix equation. */
  1496. if (condr1 < cond_ok__) {
  1497. i__1 = *lwork - (*n << 1) - *n * nr - nr;
  1498. _starpu_dgesvj_("L", "U", "N", &nr, &nr, &v[v_offset], ldv, &sva[
  1499. 1], &nr, &u[u_offset], ldu, &work[(*n << 1) + *n *
  1500. nr + nr + 1], &i__1, info);
  1501. scalem = work[(*n << 1) + *n * nr + nr + 1];
  1502. numrank = i_dnnt(&work[(*n << 1) + *n * nr + nr + 2]);
  1503. i__1 = nr;
  1504. for (p = 1; p <= i__1; ++p) {
  1505. _starpu_dcopy_(&nr, &v[p * v_dim1 + 1], &c__1, &u[p * u_dim1
  1506. + 1], &c__1);
  1507. _starpu_dscal_(&nr, &sva[p], &v[p * v_dim1 + 1], &c__1);
  1508. /* L3970: */
  1509. }
  1510. /* .. pick the right matrix equation and solve it */
  1511. if (nr == *n) {
  1512. /* :)) .. best case, R1 is inverted. The solution of this matrix */
  1513. /* equation is Q2*V2 = the product of the Jacobi rotations */
  1514. /* used in DGESVJ, premultiplied with the orthogonal matrix */
  1515. /* from the second QR factorization. */
  1516. _starpu_dtrsm_("L", "U", "N", "N", &nr, &nr, &c_b35, &a[
  1517. a_offset], lda, &v[v_offset], ldv);
  1518. } else {
  1519. /* .. R1 is well conditioned, but non-square. Transpose(R2) */
  1520. /* is inverted to get the product of the Jacobi rotations */
  1521. /* used in DGESVJ. The Q-factor from the second QR */
  1522. /* factorization is then built in explicitly. */
  1523. _starpu_dtrsm_("L", "U", "T", "N", &nr, &nr, &c_b35, &work[(*
  1524. n << 1) + 1], n, &v[v_offset], ldv);
  1525. if (nr < *n) {
  1526. i__1 = *n - nr;
  1527. _starpu_dlaset_("A", &i__1, &nr, &c_b34, &c_b34, &v[nr +
  1528. 1 + v_dim1], ldv);
  1529. i__1 = *n - nr;
  1530. _starpu_dlaset_("A", &nr, &i__1, &c_b34, &c_b34, &v[(nr +
  1531. 1) * v_dim1 + 1], ldv);
  1532. i__1 = *n - nr;
  1533. i__2 = *n - nr;
  1534. _starpu_dlaset_("A", &i__1, &i__2, &c_b34, &c_b35, &v[nr
  1535. + 1 + (nr + 1) * v_dim1], ldv);
  1536. }
  1537. i__1 = *lwork - (*n << 1) - *n * nr - nr;
  1538. _starpu_dormqr_("L", "N", n, n, &nr, &work[(*n << 1) + 1], n,
  1539. &work[*n + 1], &v[v_offset], ldv, &work[(*n <<
  1540. 1) + *n * nr + nr + 1], &i__1, &ierr);
  1541. }
  1542. } else if (condr2 < cond_ok__) {
  1543. /* :) .. the input matrix A is very likely a relative of */
  1544. /* the Kahan matrix :) */
  1545. /* The matrix R2 is inverted. The solution of the matrix equation */
  1546. /* is Q3^T*V3 = the product of the Jacobi rotations (appplied to */
  1547. /* the lower triangular L3 from the LQ factorization of */
  1548. /* R2=L3*Q3), pre-multiplied with the transposed Q3. */
  1549. i__1 = *lwork - (*n << 1) - *n * nr - nr;
  1550. _starpu_dgesvj_("L", "U", "N", &nr, &nr, &v[v_offset], ldv, &sva[
  1551. 1], &nr, &u[u_offset], ldu, &work[(*n << 1) + *n *
  1552. nr + nr + 1], &i__1, info);
  1553. scalem = work[(*n << 1) + *n * nr + nr + 1];
  1554. numrank = i_dnnt(&work[(*n << 1) + *n * nr + nr + 2]);
  1555. i__1 = nr;
  1556. for (p = 1; p <= i__1; ++p) {
  1557. _starpu_dcopy_(&nr, &v[p * v_dim1 + 1], &c__1, &u[p * u_dim1
  1558. + 1], &c__1);
  1559. _starpu_dscal_(&nr, &sva[p], &u[p * u_dim1 + 1], &c__1);
  1560. /* L3870: */
  1561. }
  1562. _starpu_dtrsm_("L", "U", "N", "N", &nr, &nr, &c_b35, &work[(*n <<
  1563. 1) + 1], n, &u[u_offset], ldu);
  1564. /* .. apply the permutation from the second QR factorization */
  1565. i__1 = nr;
  1566. for (q = 1; q <= i__1; ++q) {
  1567. i__2 = nr;
  1568. for (p = 1; p <= i__2; ++p) {
  1569. work[(*n << 1) + *n * nr + nr + iwork[*n + p]] =
  1570. u[p + q * u_dim1];
  1571. /* L872: */
  1572. }
  1573. i__2 = nr;
  1574. for (p = 1; p <= i__2; ++p) {
  1575. u[p + q * u_dim1] = work[(*n << 1) + *n * nr + nr
  1576. + p];
  1577. /* L874: */
  1578. }
  1579. /* L873: */
  1580. }
  1581. if (nr < *n) {
  1582. i__1 = *n - nr;
  1583. _starpu_dlaset_("A", &i__1, &nr, &c_b34, &c_b34, &v[nr + 1 +
  1584. v_dim1], ldv);
  1585. i__1 = *n - nr;
  1586. _starpu_dlaset_("A", &nr, &i__1, &c_b34, &c_b34, &v[(nr + 1) *
  1587. v_dim1 + 1], ldv);
  1588. i__1 = *n - nr;
  1589. i__2 = *n - nr;
  1590. _starpu_dlaset_("A", &i__1, &i__2, &c_b34, &c_b35, &v[nr + 1
  1591. + (nr + 1) * v_dim1], ldv);
  1592. }
  1593. i__1 = *lwork - (*n << 1) - *n * nr - nr;
  1594. _starpu_dormqr_("L", "N", n, n, &nr, &work[(*n << 1) + 1], n, &
  1595. work[*n + 1], &v[v_offset], ldv, &work[(*n << 1)
  1596. + *n * nr + nr + 1], &i__1, &ierr);
  1597. } else {
  1598. /* Last line of defense. */
  1599. /* #:( This is a rather pathological case: no scaled condition */
  1600. /* improvement after two pivoted QR factorizations. Other */
  1601. /* possibility is that the rank revealing QR factorization */
  1602. /* or the condition estimator has failed, or the COND_OK */
  1603. /* is set very close to ONE (which is unnecessary). Normally, */
  1604. /* this branch should never be executed, but in rare cases of */
  1605. /* failure of the RRQR or condition estimator, the last line of */
  1606. /* defense ensures that DGEJSV completes the task. */
  1607. /* Compute the full SVD of L3 using DGESVJ with explicit */
  1608. /* accumulation of Jacobi rotations. */
  1609. i__1 = *lwork - (*n << 1) - *n * nr - nr;
  1610. _starpu_dgesvj_("L", "U", "V", &nr, &nr, &v[v_offset], ldv, &sva[
  1611. 1], &nr, &u[u_offset], ldu, &work[(*n << 1) + *n *
  1612. nr + nr + 1], &i__1, info);
  1613. scalem = work[(*n << 1) + *n * nr + nr + 1];
  1614. numrank = i_dnnt(&work[(*n << 1) + *n * nr + nr + 2]);
  1615. if (nr < *n) {
  1616. i__1 = *n - nr;
  1617. _starpu_dlaset_("A", &i__1, &nr, &c_b34, &c_b34, &v[nr + 1 +
  1618. v_dim1], ldv);
  1619. i__1 = *n - nr;
  1620. _starpu_dlaset_("A", &nr, &i__1, &c_b34, &c_b34, &v[(nr + 1) *
  1621. v_dim1 + 1], ldv);
  1622. i__1 = *n - nr;
  1623. i__2 = *n - nr;
  1624. _starpu_dlaset_("A", &i__1, &i__2, &c_b34, &c_b35, &v[nr + 1
  1625. + (nr + 1) * v_dim1], ldv);
  1626. }
  1627. i__1 = *lwork - (*n << 1) - *n * nr - nr;
  1628. _starpu_dormqr_("L", "N", n, n, &nr, &work[(*n << 1) + 1], n, &
  1629. work[*n + 1], &v[v_offset], ldv, &work[(*n << 1)
  1630. + *n * nr + nr + 1], &i__1, &ierr);
  1631. i__1 = *lwork - (*n << 1) - *n * nr - nr;
  1632. _starpu_dormlq_("L", "T", &nr, &nr, &nr, &work[(*n << 1) + 1], n,
  1633. &work[(*n << 1) + *n * nr + 1], &u[u_offset], ldu,
  1634. &work[(*n << 1) + *n * nr + nr + 1], &i__1, &
  1635. ierr);
  1636. i__1 = nr;
  1637. for (q = 1; q <= i__1; ++q) {
  1638. i__2 = nr;
  1639. for (p = 1; p <= i__2; ++p) {
  1640. work[(*n << 1) + *n * nr + nr + iwork[*n + p]] =
  1641. u[p + q * u_dim1];
  1642. /* L772: */
  1643. }
  1644. i__2 = nr;
  1645. for (p = 1; p <= i__2; ++p) {
  1646. u[p + q * u_dim1] = work[(*n << 1) + *n * nr + nr
  1647. + p];
  1648. /* L774: */
  1649. }
  1650. /* L773: */
  1651. }
  1652. }
  1653. /* Permute the rows of V using the (column) permutation from the */
  1654. /* first QRF. Also, scale the columns to make them unit in */
  1655. /* Euclidean norm. This applies to all cases. */
  1656. temp1 = sqrt((doublereal) (*n)) * epsln;
  1657. i__1 = *n;
  1658. for (q = 1; q <= i__1; ++q) {
  1659. i__2 = *n;
  1660. for (p = 1; p <= i__2; ++p) {
  1661. work[(*n << 1) + *n * nr + nr + iwork[p]] = v[p + q *
  1662. v_dim1];
  1663. /* L972: */
  1664. }
  1665. i__2 = *n;
  1666. for (p = 1; p <= i__2; ++p) {
  1667. v[p + q * v_dim1] = work[(*n << 1) + *n * nr + nr + p]
  1668. ;
  1669. /* L973: */
  1670. }
  1671. xsc = 1. / _starpu_dnrm2_(n, &v[q * v_dim1 + 1], &c__1);
  1672. if (xsc < 1. - temp1 || xsc > temp1 + 1.) {
  1673. _starpu_dscal_(n, &xsc, &v[q * v_dim1 + 1], &c__1);
  1674. }
  1675. /* L1972: */
  1676. }
  1677. /* At this moment, V contains the right singular vectors of A. */
  1678. /* Next, assemble the left singular vector matrix U (M x N). */
  1679. if (nr < *m) {
  1680. i__1 = *m - nr;
  1681. _starpu_dlaset_("A", &i__1, &nr, &c_b34, &c_b34, &u[nr + 1 +
  1682. u_dim1], ldu);
  1683. if (nr < n1) {
  1684. i__1 = n1 - nr;
  1685. _starpu_dlaset_("A", &nr, &i__1, &c_b34, &c_b34, &u[(nr + 1) *
  1686. u_dim1 + 1], ldu);
  1687. i__1 = *m - nr;
  1688. i__2 = n1 - nr;
  1689. _starpu_dlaset_("A", &i__1, &i__2, &c_b34, &c_b35, &u[nr + 1
  1690. + (nr + 1) * u_dim1], ldu);
  1691. }
  1692. }
  1693. /* The Q matrix from the first QRF is built into the left singular */
  1694. /* matrix U. This applies to all cases. */
  1695. i__1 = *lwork - *n;
  1696. _starpu_dormqr_("Left", "No_Tr", m, &n1, n, &a[a_offset], lda, &work[
  1697. 1], &u[u_offset], ldu, &work[*n + 1], &i__1, &ierr);
  1698. /* The columns of U are normalized. The cost is O(M*N) flops. */
  1699. temp1 = sqrt((doublereal) (*m)) * epsln;
  1700. i__1 = nr;
  1701. for (p = 1; p <= i__1; ++p) {
  1702. xsc = 1. / _starpu_dnrm2_(m, &u[p * u_dim1 + 1], &c__1);
  1703. if (xsc < 1. - temp1 || xsc > temp1 + 1.) {
  1704. _starpu_dscal_(m, &xsc, &u[p * u_dim1 + 1], &c__1);
  1705. }
  1706. /* L1973: */
  1707. }
  1708. /* If the initial QRF is computed with row pivoting, the left */
  1709. /* singular vectors must be adjusted. */
  1710. if (rowpiv) {
  1711. i__1 = *m - 1;
  1712. _starpu_dlaswp_(&n1, &u[u_offset], ldu, &c__1, &i__1, &iwork[(*n
  1713. << 1) + 1], &c_n1);
  1714. }
  1715. } else {
  1716. /* .. the initial matrix A has almost orthogonal columns and */
  1717. /* the second QRF is not needed */
  1718. _starpu_dlacpy_("Upper", n, n, &a[a_offset], lda, &work[*n + 1], n);
  1719. if (l2pert) {
  1720. xsc = sqrt(small);
  1721. i__1 = *n;
  1722. for (p = 2; p <= i__1; ++p) {
  1723. temp1 = xsc * work[*n + (p - 1) * *n + p];
  1724. i__2 = p - 1;
  1725. for (q = 1; q <= i__2; ++q) {
  1726. work[*n + (q - 1) * *n + p] = -d_sign(&temp1, &
  1727. work[*n + (p - 1) * *n + q]);
  1728. /* L5971: */
  1729. }
  1730. /* L5970: */
  1731. }
  1732. } else {
  1733. i__1 = *n - 1;
  1734. i__2 = *n - 1;
  1735. _starpu_dlaset_("Lower", &i__1, &i__2, &c_b34, &c_b34, &work[*n +
  1736. 2], n);
  1737. }
  1738. i__1 = *lwork - *n - *n * *n;
  1739. _starpu_dgesvj_("Upper", "U", "N", n, n, &work[*n + 1], n, &sva[1], n,
  1740. &u[u_offset], ldu, &work[*n + *n * *n + 1], &i__1,
  1741. info);
  1742. scalem = work[*n + *n * *n + 1];
  1743. numrank = i_dnnt(&work[*n + *n * *n + 2]);
  1744. i__1 = *n;
  1745. for (p = 1; p <= i__1; ++p) {
  1746. _starpu_dcopy_(n, &work[*n + (p - 1) * *n + 1], &c__1, &u[p *
  1747. u_dim1 + 1], &c__1);
  1748. _starpu_dscal_(n, &sva[p], &work[*n + (p - 1) * *n + 1], &c__1);
  1749. /* L6970: */
  1750. }
  1751. _starpu_dtrsm_("Left", "Upper", "NoTrans", "No UD", n, n, &c_b35, &a[
  1752. a_offset], lda, &work[*n + 1], n);
  1753. i__1 = *n;
  1754. for (p = 1; p <= i__1; ++p) {
  1755. _starpu_dcopy_(n, &work[*n + p], n, &v[iwork[p] + v_dim1], ldv);
  1756. /* L6972: */
  1757. }
  1758. temp1 = sqrt((doublereal) (*n)) * epsln;
  1759. i__1 = *n;
  1760. for (p = 1; p <= i__1; ++p) {
  1761. xsc = 1. / _starpu_dnrm2_(n, &v[p * v_dim1 + 1], &c__1);
  1762. if (xsc < 1. - temp1 || xsc > temp1 + 1.) {
  1763. _starpu_dscal_(n, &xsc, &v[p * v_dim1 + 1], &c__1);
  1764. }
  1765. /* L6971: */
  1766. }
  1767. /* Assemble the left singular vector matrix U (M x N). */
  1768. if (*n < *m) {
  1769. i__1 = *m - *n;
  1770. _starpu_dlaset_("A", &i__1, n, &c_b34, &c_b34, &u[nr + 1 + u_dim1]
  1771. , ldu);
  1772. if (*n < n1) {
  1773. i__1 = n1 - *n;
  1774. _starpu_dlaset_("A", n, &i__1, &c_b34, &c_b34, &u[(*n + 1) *
  1775. u_dim1 + 1], ldu);
  1776. i__1 = *m - *n;
  1777. i__2 = n1 - *n;
  1778. _starpu_dlaset_("A", &i__1, &i__2, &c_b34, &c_b35, &u[nr + 1
  1779. + (*n + 1) * u_dim1], ldu);
  1780. }
  1781. }
  1782. i__1 = *lwork - *n;
  1783. _starpu_dormqr_("Left", "No Tr", m, &n1, n, &a[a_offset], lda, &work[
  1784. 1], &u[u_offset], ldu, &work[*n + 1], &i__1, &ierr);
  1785. temp1 = sqrt((doublereal) (*m)) * epsln;
  1786. i__1 = n1;
  1787. for (p = 1; p <= i__1; ++p) {
  1788. xsc = 1. / _starpu_dnrm2_(m, &u[p * u_dim1 + 1], &c__1);
  1789. if (xsc < 1. - temp1 || xsc > temp1 + 1.) {
  1790. _starpu_dscal_(m, &xsc, &u[p * u_dim1 + 1], &c__1);
  1791. }
  1792. /* L6973: */
  1793. }
  1794. if (rowpiv) {
  1795. i__1 = *m - 1;
  1796. _starpu_dlaswp_(&n1, &u[u_offset], ldu, &c__1, &i__1, &iwork[(*n
  1797. << 1) + 1], &c_n1);
  1798. }
  1799. }
  1800. /* end of the >> almost orthogonal case << in the full SVD */
  1801. } else {
  1802. /* This branch deploys a preconditioned Jacobi SVD with explicitly */
  1803. /* accumulated rotations. It is included as optional, mainly for */
  1804. /* experimental purposes. It does perfom well, and can also be used. */
  1805. /* In this implementation, this branch will be automatically activated */
  1806. /* if the condition number sigma_max(A) / sigma_min(A) is predicted */
  1807. /* to be greater than the overflow threshold. This is because the */
  1808. /* a posteriori computation of the singular vectors assumes robust */
  1809. /* implementation of BLAS and some LAPACK procedures, capable of working */
  1810. /* in presence of extreme values. Since that is not always the case, ... */
  1811. i__1 = nr;
  1812. for (p = 1; p <= i__1; ++p) {
  1813. i__2 = *n - p + 1;
  1814. _starpu_dcopy_(&i__2, &a[p + p * a_dim1], lda, &v[p + p * v_dim1], &
  1815. c__1);
  1816. /* L7968: */
  1817. }
  1818. if (l2pert) {
  1819. xsc = sqrt(small / epsln);
  1820. i__1 = nr;
  1821. for (q = 1; q <= i__1; ++q) {
  1822. temp1 = xsc * (d__1 = v[q + q * v_dim1], abs(d__1));
  1823. i__2 = *n;
  1824. for (p = 1; p <= i__2; ++p) {
  1825. if (p > q && (d__1 = v[p + q * v_dim1], abs(d__1)) <=
  1826. temp1 || p < q) {
  1827. v[p + q * v_dim1] = d_sign(&temp1, &v[p + q *
  1828. v_dim1]);
  1829. }
  1830. if (p < q) {
  1831. v[p + q * v_dim1] = -v[p + q * v_dim1];
  1832. }
  1833. /* L5968: */
  1834. }
  1835. /* L5969: */
  1836. }
  1837. } else {
  1838. i__1 = nr - 1;
  1839. i__2 = nr - 1;
  1840. _starpu_dlaset_("U", &i__1, &i__2, &c_b34, &c_b34, &v[(v_dim1 << 1) +
  1841. 1], ldv);
  1842. }
  1843. i__1 = *lwork - (*n << 1);
  1844. _starpu_dgeqrf_(n, &nr, &v[v_offset], ldv, &work[*n + 1], &work[(*n << 1)
  1845. + 1], &i__1, &ierr);
  1846. _starpu_dlacpy_("L", n, &nr, &v[v_offset], ldv, &work[(*n << 1) + 1], n);
  1847. i__1 = nr;
  1848. for (p = 1; p <= i__1; ++p) {
  1849. i__2 = nr - p + 1;
  1850. _starpu_dcopy_(&i__2, &v[p + p * v_dim1], ldv, &u[p + p * u_dim1], &
  1851. c__1);
  1852. /* L7969: */
  1853. }
  1854. if (l2pert) {
  1855. xsc = sqrt(small / epsln);
  1856. i__1 = nr;
  1857. for (q = 2; q <= i__1; ++q) {
  1858. i__2 = q - 1;
  1859. for (p = 1; p <= i__2; ++p) {
  1860. /* Computing MIN */
  1861. d__3 = (d__1 = u[p + p * u_dim1], abs(d__1)), d__4 = (
  1862. d__2 = u[q + q * u_dim1], abs(d__2));
  1863. temp1 = xsc * min(d__3,d__4);
  1864. u[p + q * u_dim1] = -d_sign(&temp1, &u[q + p * u_dim1]
  1865. );
  1866. /* L9971: */
  1867. }
  1868. /* L9970: */
  1869. }
  1870. } else {
  1871. i__1 = nr - 1;
  1872. i__2 = nr - 1;
  1873. _starpu_dlaset_("U", &i__1, &i__2, &c_b34, &c_b34, &u[(u_dim1 << 1) +
  1874. 1], ldu);
  1875. }
  1876. i__1 = *lwork - (*n << 1) - *n * nr;
  1877. _starpu_dgesvj_("G", "U", "V", &nr, &nr, &u[u_offset], ldu, &sva[1], n, &
  1878. v[v_offset], ldv, &work[(*n << 1) + *n * nr + 1], &i__1,
  1879. info);
  1880. scalem = work[(*n << 1) + *n * nr + 1];
  1881. numrank = i_dnnt(&work[(*n << 1) + *n * nr + 2]);
  1882. if (nr < *n) {
  1883. i__1 = *n - nr;
  1884. _starpu_dlaset_("A", &i__1, &nr, &c_b34, &c_b34, &v[nr + 1 + v_dim1],
  1885. ldv);
  1886. i__1 = *n - nr;
  1887. _starpu_dlaset_("A", &nr, &i__1, &c_b34, &c_b34, &v[(nr + 1) * v_dim1
  1888. + 1], ldv);
  1889. i__1 = *n - nr;
  1890. i__2 = *n - nr;
  1891. _starpu_dlaset_("A", &i__1, &i__2, &c_b34, &c_b35, &v[nr + 1 + (nr +
  1892. 1) * v_dim1], ldv);
  1893. }
  1894. i__1 = *lwork - (*n << 1) - *n * nr - nr;
  1895. _starpu_dormqr_("L", "N", n, n, &nr, &work[(*n << 1) + 1], n, &work[*n +
  1896. 1], &v[v_offset], ldv, &work[(*n << 1) + *n * nr + nr + 1]
  1897. , &i__1, &ierr);
  1898. /* Permute the rows of V using the (column) permutation from the */
  1899. /* first QRF. Also, scale the columns to make them unit in */
  1900. /* Euclidean norm. This applies to all cases. */
  1901. temp1 = sqrt((doublereal) (*n)) * epsln;
  1902. i__1 = *n;
  1903. for (q = 1; q <= i__1; ++q) {
  1904. i__2 = *n;
  1905. for (p = 1; p <= i__2; ++p) {
  1906. work[(*n << 1) + *n * nr + nr + iwork[p]] = v[p + q *
  1907. v_dim1];
  1908. /* L8972: */
  1909. }
  1910. i__2 = *n;
  1911. for (p = 1; p <= i__2; ++p) {
  1912. v[p + q * v_dim1] = work[(*n << 1) + *n * nr + nr + p];
  1913. /* L8973: */
  1914. }
  1915. xsc = 1. / _starpu_dnrm2_(n, &v[q * v_dim1 + 1], &c__1);
  1916. if (xsc < 1. - temp1 || xsc > temp1 + 1.) {
  1917. _starpu_dscal_(n, &xsc, &v[q * v_dim1 + 1], &c__1);
  1918. }
  1919. /* L7972: */
  1920. }
  1921. /* At this moment, V contains the right singular vectors of A. */
  1922. /* Next, assemble the left singular vector matrix U (M x N). */
  1923. if (*n < *m) {
  1924. i__1 = *m - *n;
  1925. _starpu_dlaset_("A", &i__1, n, &c_b34, &c_b34, &u[nr + 1 + u_dim1],
  1926. ldu);
  1927. if (*n < n1) {
  1928. i__1 = n1 - *n;
  1929. _starpu_dlaset_("A", n, &i__1, &c_b34, &c_b34, &u[(*n + 1) *
  1930. u_dim1 + 1], ldu);
  1931. i__1 = *m - *n;
  1932. i__2 = n1 - *n;
  1933. _starpu_dlaset_("A", &i__1, &i__2, &c_b34, &c_b35, &u[nr + 1 + (*
  1934. n + 1) * u_dim1], ldu);
  1935. }
  1936. }
  1937. i__1 = *lwork - *n;
  1938. _starpu_dormqr_("Left", "No Tr", m, &n1, n, &a[a_offset], lda, &work[1], &
  1939. u[u_offset], ldu, &work[*n + 1], &i__1, &ierr);
  1940. if (rowpiv) {
  1941. i__1 = *m - 1;
  1942. _starpu_dlaswp_(&n1, &u[u_offset], ldu, &c__1, &i__1, &iwork[(*n << 1)
  1943. + 1], &c_n1);
  1944. }
  1945. }
  1946. if (transp) {
  1947. /* .. swap U and V because the procedure worked on A^t */
  1948. i__1 = *n;
  1949. for (p = 1; p <= i__1; ++p) {
  1950. _starpu_dswap_(n, &u[p * u_dim1 + 1], &c__1, &v[p * v_dim1 + 1], &
  1951. c__1);
  1952. /* L6974: */
  1953. }
  1954. }
  1955. }
  1956. /* end of the full SVD */
  1957. /* Undo scaling, if necessary (and possible) */
  1958. if (uscal2 <= big / sva[1] * uscal1) {
  1959. _starpu_dlascl_("G", &c__0, &c__0, &uscal1, &uscal2, &nr, &c__1, &sva[1], n, &
  1960. ierr);
  1961. uscal1 = 1.;
  1962. uscal2 = 1.;
  1963. }
  1964. if (nr < *n) {
  1965. i__1 = *n;
  1966. for (p = nr + 1; p <= i__1; ++p) {
  1967. sva[p] = 0.;
  1968. /* L3004: */
  1969. }
  1970. }
  1971. work[1] = uscal2 * scalem;
  1972. work[2] = uscal1;
  1973. if (errest) {
  1974. work[3] = sconda;
  1975. }
  1976. if (lsvec && rsvec) {
  1977. work[4] = condr1;
  1978. work[5] = condr2;
  1979. }
  1980. if (l2tran) {
  1981. work[6] = entra;
  1982. work[7] = entrat;
  1983. }
  1984. iwork[1] = nr;
  1985. iwork[2] = numrank;
  1986. iwork[3] = warning;
  1987. return 0;
  1988. /* .. */
  1989. /* .. END OF DGEJSV */
  1990. /* .. */
  1991. } /* _starpu_dgejsv_ */