zher.c 9.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338
  1. /* zher.f -- translated by f2c (version 20061008).
  2. You must link the resulting object file with libf2c:
  3. on Microsoft Windows system, link with libf2c.lib;
  4. on Linux or Unix systems, link with .../path/to/libf2c.a -lm
  5. or, if you install libf2c.a in a standard place, with -lf2c -lm
  6. -- in that order, at the end of the command line, as in
  7. cc *.o -lf2c -lm
  8. Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
  9. http://www.netlib.org/f2c/libf2c.zip
  10. */
  11. #include "f2c.h"
  12. #include "blaswrap.h"
  13. /* Subroutine */ int zher_(char *uplo, integer *n, doublereal *alpha,
  14. doublecomplex *x, integer *incx, doublecomplex *a, integer *lda)
  15. {
  16. /* System generated locals */
  17. integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
  18. doublereal d__1;
  19. doublecomplex z__1, z__2;
  20. /* Builtin functions */
  21. void d_cnjg(doublecomplex *, doublecomplex *);
  22. /* Local variables */
  23. integer i__, j, ix, jx, kx, info;
  24. doublecomplex temp;
  25. extern logical lsame_(char *, char *);
  26. extern /* Subroutine */ int xerbla_(char *, integer *);
  27. /* .. Scalar Arguments .. */
  28. /* .. */
  29. /* .. Array Arguments .. */
  30. /* .. */
  31. /* Purpose */
  32. /* ======= */
  33. /* ZHER performs the hermitian rank 1 operation */
  34. /* A := alpha*x*conjg( x' ) + A, */
  35. /* where alpha is a real scalar, x is an n element vector and A is an */
  36. /* n by n hermitian matrix. */
  37. /* Arguments */
  38. /* ========== */
  39. /* UPLO - CHARACTER*1. */
  40. /* On entry, UPLO specifies whether the upper or lower */
  41. /* triangular part of the array A is to be referenced as */
  42. /* follows: */
  43. /* UPLO = 'U' or 'u' Only the upper triangular part of A */
  44. /* is to be referenced. */
  45. /* UPLO = 'L' or 'l' Only the lower triangular part of A */
  46. /* is to be referenced. */
  47. /* Unchanged on exit. */
  48. /* N - INTEGER. */
  49. /* On entry, N specifies the order of the matrix A. */
  50. /* N must be at least zero. */
  51. /* Unchanged on exit. */
  52. /* ALPHA - DOUBLE PRECISION. */
  53. /* On entry, ALPHA specifies the scalar alpha. */
  54. /* Unchanged on exit. */
  55. /* X - COMPLEX*16 array of dimension at least */
  56. /* ( 1 + ( n - 1 )*abs( INCX ) ). */
  57. /* Before entry, the incremented array X must contain the n */
  58. /* element vector x. */
  59. /* Unchanged on exit. */
  60. /* INCX - INTEGER. */
  61. /* On entry, INCX specifies the increment for the elements of */
  62. /* X. INCX must not be zero. */
  63. /* Unchanged on exit. */
  64. /* A - COMPLEX*16 array of DIMENSION ( LDA, n ). */
  65. /* Before entry with UPLO = 'U' or 'u', the leading n by n */
  66. /* upper triangular part of the array A must contain the upper */
  67. /* triangular part of the hermitian matrix and the strictly */
  68. /* lower triangular part of A is not referenced. On exit, the */
  69. /* upper triangular part of the array A is overwritten by the */
  70. /* upper triangular part of the updated matrix. */
  71. /* Before entry with UPLO = 'L' or 'l', the leading n by n */
  72. /* lower triangular part of the array A must contain the lower */
  73. /* triangular part of the hermitian matrix and the strictly */
  74. /* upper triangular part of A is not referenced. On exit, the */
  75. /* lower triangular part of the array A is overwritten by the */
  76. /* lower triangular part of the updated matrix. */
  77. /* Note that the imaginary parts of the diagonal elements need */
  78. /* not be set, they are assumed to be zero, and on exit they */
  79. /* are set to zero. */
  80. /* LDA - INTEGER. */
  81. /* On entry, LDA specifies the first dimension of A as declared */
  82. /* in the calling (sub) program. LDA must be at least */
  83. /* max( 1, n ). */
  84. /* Unchanged on exit. */
  85. /* Level 2 Blas routine. */
  86. /* -- Written on 22-October-1986. */
  87. /* Jack Dongarra, Argonne National Lab. */
  88. /* Jeremy Du Croz, Nag Central Office. */
  89. /* Sven Hammarling, Nag Central Office. */
  90. /* Richard Hanson, Sandia National Labs. */
  91. /* .. Parameters .. */
  92. /* .. */
  93. /* .. Local Scalars .. */
  94. /* .. */
  95. /* .. External Functions .. */
  96. /* .. */
  97. /* .. External Subroutines .. */
  98. /* .. */
  99. /* .. Intrinsic Functions .. */
  100. /* .. */
  101. /* Test the input parameters. */
  102. /* Parameter adjustments */
  103. --x;
  104. a_dim1 = *lda;
  105. a_offset = 1 + a_dim1;
  106. a -= a_offset;
  107. /* Function Body */
  108. info = 0;
  109. if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
  110. info = 1;
  111. } else if (*n < 0) {
  112. info = 2;
  113. } else if (*incx == 0) {
  114. info = 5;
  115. } else if (*lda < max(1,*n)) {
  116. info = 7;
  117. }
  118. if (info != 0) {
  119. xerbla_("ZHER ", &info);
  120. return 0;
  121. }
  122. /* Quick return if possible. */
  123. if (*n == 0 || *alpha == 0.) {
  124. return 0;
  125. }
  126. /* Set the start point in X if the increment is not unity. */
  127. if (*incx <= 0) {
  128. kx = 1 - (*n - 1) * *incx;
  129. } else if (*incx != 1) {
  130. kx = 1;
  131. }
  132. /* Start the operations. In this version the elements of A are */
  133. /* accessed sequentially with one pass through the triangular part */
  134. /* of A. */
  135. if (lsame_(uplo, "U")) {
  136. /* Form A when A is stored in upper triangle. */
  137. if (*incx == 1) {
  138. i__1 = *n;
  139. for (j = 1; j <= i__1; ++j) {
  140. i__2 = j;
  141. if (x[i__2].r != 0. || x[i__2].i != 0.) {
  142. d_cnjg(&z__2, &x[j]);
  143. z__1.r = *alpha * z__2.r, z__1.i = *alpha * z__2.i;
  144. temp.r = z__1.r, temp.i = z__1.i;
  145. i__2 = j - 1;
  146. for (i__ = 1; i__ <= i__2; ++i__) {
  147. i__3 = i__ + j * a_dim1;
  148. i__4 = i__ + j * a_dim1;
  149. i__5 = i__;
  150. z__2.r = x[i__5].r * temp.r - x[i__5].i * temp.i,
  151. z__2.i = x[i__5].r * temp.i + x[i__5].i *
  152. temp.r;
  153. z__1.r = a[i__4].r + z__2.r, z__1.i = a[i__4].i +
  154. z__2.i;
  155. a[i__3].r = z__1.r, a[i__3].i = z__1.i;
  156. /* L10: */
  157. }
  158. i__2 = j + j * a_dim1;
  159. i__3 = j + j * a_dim1;
  160. i__4 = j;
  161. z__1.r = x[i__4].r * temp.r - x[i__4].i * temp.i, z__1.i =
  162. x[i__4].r * temp.i + x[i__4].i * temp.r;
  163. d__1 = a[i__3].r + z__1.r;
  164. a[i__2].r = d__1, a[i__2].i = 0.;
  165. } else {
  166. i__2 = j + j * a_dim1;
  167. i__3 = j + j * a_dim1;
  168. d__1 = a[i__3].r;
  169. a[i__2].r = d__1, a[i__2].i = 0.;
  170. }
  171. /* L20: */
  172. }
  173. } else {
  174. jx = kx;
  175. i__1 = *n;
  176. for (j = 1; j <= i__1; ++j) {
  177. i__2 = jx;
  178. if (x[i__2].r != 0. || x[i__2].i != 0.) {
  179. d_cnjg(&z__2, &x[jx]);
  180. z__1.r = *alpha * z__2.r, z__1.i = *alpha * z__2.i;
  181. temp.r = z__1.r, temp.i = z__1.i;
  182. ix = kx;
  183. i__2 = j - 1;
  184. for (i__ = 1; i__ <= i__2; ++i__) {
  185. i__3 = i__ + j * a_dim1;
  186. i__4 = i__ + j * a_dim1;
  187. i__5 = ix;
  188. z__2.r = x[i__5].r * temp.r - x[i__5].i * temp.i,
  189. z__2.i = x[i__5].r * temp.i + x[i__5].i *
  190. temp.r;
  191. z__1.r = a[i__4].r + z__2.r, z__1.i = a[i__4].i +
  192. z__2.i;
  193. a[i__3].r = z__1.r, a[i__3].i = z__1.i;
  194. ix += *incx;
  195. /* L30: */
  196. }
  197. i__2 = j + j * a_dim1;
  198. i__3 = j + j * a_dim1;
  199. i__4 = jx;
  200. z__1.r = x[i__4].r * temp.r - x[i__4].i * temp.i, z__1.i =
  201. x[i__4].r * temp.i + x[i__4].i * temp.r;
  202. d__1 = a[i__3].r + z__1.r;
  203. a[i__2].r = d__1, a[i__2].i = 0.;
  204. } else {
  205. i__2 = j + j * a_dim1;
  206. i__3 = j + j * a_dim1;
  207. d__1 = a[i__3].r;
  208. a[i__2].r = d__1, a[i__2].i = 0.;
  209. }
  210. jx += *incx;
  211. /* L40: */
  212. }
  213. }
  214. } else {
  215. /* Form A when A is stored in lower triangle. */
  216. if (*incx == 1) {
  217. i__1 = *n;
  218. for (j = 1; j <= i__1; ++j) {
  219. i__2 = j;
  220. if (x[i__2].r != 0. || x[i__2].i != 0.) {
  221. d_cnjg(&z__2, &x[j]);
  222. z__1.r = *alpha * z__2.r, z__1.i = *alpha * z__2.i;
  223. temp.r = z__1.r, temp.i = z__1.i;
  224. i__2 = j + j * a_dim1;
  225. i__3 = j + j * a_dim1;
  226. i__4 = j;
  227. z__1.r = temp.r * x[i__4].r - temp.i * x[i__4].i, z__1.i =
  228. temp.r * x[i__4].i + temp.i * x[i__4].r;
  229. d__1 = a[i__3].r + z__1.r;
  230. a[i__2].r = d__1, a[i__2].i = 0.;
  231. i__2 = *n;
  232. for (i__ = j + 1; i__ <= i__2; ++i__) {
  233. i__3 = i__ + j * a_dim1;
  234. i__4 = i__ + j * a_dim1;
  235. i__5 = i__;
  236. z__2.r = x[i__5].r * temp.r - x[i__5].i * temp.i,
  237. z__2.i = x[i__5].r * temp.i + x[i__5].i *
  238. temp.r;
  239. z__1.r = a[i__4].r + z__2.r, z__1.i = a[i__4].i +
  240. z__2.i;
  241. a[i__3].r = z__1.r, a[i__3].i = z__1.i;
  242. /* L50: */
  243. }
  244. } else {
  245. i__2 = j + j * a_dim1;
  246. i__3 = j + j * a_dim1;
  247. d__1 = a[i__3].r;
  248. a[i__2].r = d__1, a[i__2].i = 0.;
  249. }
  250. /* L60: */
  251. }
  252. } else {
  253. jx = kx;
  254. i__1 = *n;
  255. for (j = 1; j <= i__1; ++j) {
  256. i__2 = jx;
  257. if (x[i__2].r != 0. || x[i__2].i != 0.) {
  258. d_cnjg(&z__2, &x[jx]);
  259. z__1.r = *alpha * z__2.r, z__1.i = *alpha * z__2.i;
  260. temp.r = z__1.r, temp.i = z__1.i;
  261. i__2 = j + j * a_dim1;
  262. i__3 = j + j * a_dim1;
  263. i__4 = jx;
  264. z__1.r = temp.r * x[i__4].r - temp.i * x[i__4].i, z__1.i =
  265. temp.r * x[i__4].i + temp.i * x[i__4].r;
  266. d__1 = a[i__3].r + z__1.r;
  267. a[i__2].r = d__1, a[i__2].i = 0.;
  268. ix = jx;
  269. i__2 = *n;
  270. for (i__ = j + 1; i__ <= i__2; ++i__) {
  271. ix += *incx;
  272. i__3 = i__ + j * a_dim1;
  273. i__4 = i__ + j * a_dim1;
  274. i__5 = ix;
  275. z__2.r = x[i__5].r * temp.r - x[i__5].i * temp.i,
  276. z__2.i = x[i__5].r * temp.i + x[i__5].i *
  277. temp.r;
  278. z__1.r = a[i__4].r + z__2.r, z__1.i = a[i__4].i +
  279. z__2.i;
  280. a[i__3].r = z__1.r, a[i__3].i = z__1.i;
  281. /* L70: */
  282. }
  283. } else {
  284. i__2 = j + j * a_dim1;
  285. i__3 = j + j * a_dim1;
  286. d__1 = a[i__3].r;
  287. a[i__2].r = d__1, a[i__2].i = 0.;
  288. }
  289. jx += *incx;
  290. /* L80: */
  291. }
  292. }
  293. }
  294. return 0;
  295. /* End of ZHER . */
  296. } /* zher_ */