\documentclass[twoside]{article} \usepackage{amssymb} % font used for R in Real numbers \pagestyle{myheadings} \markboth{\hfil Ginzburg-Landau equation\hfil EJDE--2000/42} {EJDE--2000/42\hfil Tai-Chia Lin \hfil} \begin{document} \title{\vspace{-1in}\parbox{\linewidth}{\footnotesize\noindent {\sc Electronic Journal of Differential Equations}, Vol.~{\bf 2000}(2000), No.~42, pp.~1--25. \newline ISSN: 1072-6691. URL: http://ejde.math.swt.edu or http://ejde.math.unt.edu \newline ftp ejde.math.swt.edu \quad ftp ejde.math.unt.edu (login: ftp)} \vspace{\bigskipamount} \\ % Spectrum of the linearized operator for the Ginzburg-Landau equation \thanks{ {\em Mathematics Subject Classifications:} 35P15, 35K55, 35Q55. \hfil\break\indent {\em Key words:} Ginzburg-Landau equation, spectrum, vortex dynamics, superfluid. \hfil\break\indent \copyright 2000 Southwest Texas State University and University of North Texas. \hfil\break\indent Submitted May 1, 2000. Published June 9, 2000.} } \date{} % \author{ Tai-Chia Lin } \maketitle \begin{abstract} We study the spectrum of the linearized operator for the Ginzburg-Landau equation about a symmetric vortex solution with degree one. We show that the smallest eigenvalue of the linearized operator has multiplicity two, and then we describe its behavior as a small parameter approaches zero. We also find a positive lower bound for all the other eigenvalues, and find estimates of the first eigenfunction. Then using these results, we give partial results on the dynamics of vortices in the nonlinear heat and Schr\"{o}dinger equations. \end{abstract} \renewcommand{\theequation}{\thesection.\arabic{equation}} \section{Introduction} We consider the steady state for the Ginzburg-Landau equation \begin{equation} \label{1.1} \Delta u + \frac{1}{\epsilon^2} u (1-|u|^2) = 0 \quad\hbox{for } x \in {\mathbb R}^2\,, \\ \end{equation} where the solution $u$ is a complex-valued function and $\epsilon$ is a small positive parameter. Symmetric vortex solution to (\ref{1.1}) with degree one has been obtained in \cite{CET,H, HH}. The solution have the form $$ u(x)=U_1(x)=f_0(\frac{r}{\epsilon}) e^{i \theta}\,, $$ where $(r, \theta)$ denote the polar coordinates of $x \in {\mathbb R}^2$ and $f_0=f_0(s)$ is the solution of \begin{equation} \label{1.2} \begin{array}{c} -f'' - \frac{f'}{s} + \frac{1}{s^2} f = f \cdot (1-f^2) \quad \hbox{ for } s > 0\,, \\[6pt] f(+\infty) =1,\quad f(0) =0, \quad f\geq 0\,. \end{array} \end{equation} Moreover, $f_0(s)$ satisfies \begin{eqnarray} \label{1.3} f_0(s)&=& s (A_0 + \sum_{k=1}^\infty P_{2k}(A_0) s^{2k}) \quad\hbox{for } s > 0\,, \\ \label{1.4} f_0(s)&=& 1- \frac{1}{2s^2}- \frac{9}{8 s^4}+ \dots \quad\hbox{as } s\to +\infty\,, \end{eqnarray} where $A_0$ and the $P_{2k}(A_0)$ are constants with $A_0$ positive. We consider a small perturbation about $U_1$ of the form $$ u(x) = U_1(x) + \epsilon v(x), $$ where $v$ is a smooth function, $v(x) = 0$ for $x \in \partial B $ and $B=B_1(0)$ is the unit disk in ${\mathbb R}^2$ centered at the origin. Then (\ref{1.1}) and (\ref{1.2}) imply $$\displaylines{ L_\epsilon v + N(v) = 0 \quad\hbox{for } x\in B\,,\cr v= 0 \quad\hbox{for } x\in \partial B\,, }$$ where \begin{eqnarray} &L_\epsilon v= - \Delta v - \frac{1}{\epsilon^2} (1- |U_1|^2)v + \frac{2}{\epsilon^2}(U_1 \cdot v)U_1,& \nonumber\\ &U_1 \cdot v = \frac{1}{2}(\bar{U_1}v + U_1\bar{v}),& \label{5.2} \\ &N(v)= \frac{1}{\epsilon}[ 2(U_1 \cdot v) v + |v|^2 U_1 ] + |v|^2 v\,.& \nonumber \end{eqnarray} Here the operator $L_\epsilon : H^1_0(B ; {\mathbb C})\cap H^2(B ; {\mathbb C}) \to L^{2}(B ; {\mathbb C})$ is the linearized operator of (\ref{1.1}) about $U_1$. In this paper, we find estimates for the eigenvalues and the first eigenfunction of the operator $L_\epsilon$. Since $L_\epsilon$ is self-adjoint, then all eigenvalues of $L_\epsilon$ must be real. Hence the eigenvalue problem becomes \begin{equation}\label{5.1} L_\epsilon w = \lambda w,\quad \lambda\in{\mathbb R},\quad w\in H^1_0(B ; {\mathbb C})\cap H^2(B ; {\mathbb C}). \end{equation} Lieb and Loss~\cite{LL} proved that the first eigenvalue on (\ref{5.1}) is nonnegative. Later, Mironescu~\cite{M} showed that \paragraph{Theorem~A:} {\it The first eigenvalue of $L_\epsilon$ is positive.}\medskip \noindent In \cite{L}, we find quantitative estimates for (\ref{5.1}), such as the following. \paragraph{Theorem~B:} {\it Let $V_1 = \{ a(r) + b(r) e^{2i \theta} \in H^1_0 (B; {\mathbb C} )\cap H^2 (B; {\mathbb C} ) \}$. Then \begin{description} \item{{\rm (i)}} There exist positive constants $c_1, \epsilon_1$ independent of $\epsilon$ such that $$ \langle L_\epsilon w\,, w \rangle \geq c_1 > 0\quad\hbox{ for } w\in V_1^\perp, \| w\|_{L^2(B)}=1\,, 0<\epsilon\leq\epsilon_1\,, $$ where $V_1^\perp = \{ w\in H^1_0 (B; {\mathbb C} )\cap H^2 (B; {\mathbb C} ) : \langle w,v\rangle=0\,,\forall v\in V_1\}$ and $\langle\cdot, \cdot\rangle$ is the inner product in $L^2(B)$, \item{{\rm (ii)}} $0<\lambda_1\to 0$ as $\epsilon\to 0+$. \item{{\rm(iii)}} The first eigenfunction corresponding to the first eigenvalue $\lambda_1$ has the form $a_\epsilon(r)+b_\epsilon(r) e^{2i \theta}$ such that $a_\epsilon\geq b_\epsilon\geq 0$ for $0\leq r\leq 1$. \end{description} }\medskip The proof of Theorem~B(iii) can be found in \cite{M}. Note that Theorem~B estimates many eigenvalues but not all eigenvalues. In addition, the multiplicity of the first eigenvalue $\lambda_1$ is still unknown. Furthermore, there is no estimate about the first eigenfunction corresponding to the first eigenvalue $\lambda_1$. We improve Theorem~B by our main results as follows. \paragraph{Theorem I:} {\it \begin{description} \item{{\rm (i)}} The eigenspace of the eigenvalue $\lambda_1$ is only two-dimensional which is spanned by two eigenfunctions $a_\epsilon(r) + b_\epsilon(r) e^{2i \theta}$ and $i a_\epsilon(r) - i b_\epsilon(r) e^{2i \theta}$, where $a_\epsilon(r)$ and $b_\epsilon(r)$ are real-valued. \item{{\rm (ii)}} There exist positive constants $c_1, \epsilon_1$ independent of $\epsilon$ such that the second eigenvalue $\lambda_2$ satisfies $\lambda_2(\epsilon) \geq c_1$ for $0 < \epsilon \leq \epsilon_1$. \item{{\rm (iii)}} $0 < \lambda_1 = O((\log\frac{1}{\epsilon})^{-1})$ as $\epsilon\to 0+$. \end{description}} >From (\ref{5.2}), $L_\epsilon w = \lambda_1 w$, $\lambda_1\in {\mathbb R}$, $w\in V_1$ if and only if $w= \alpha(a(r)+b(r) e^{2i \theta})+\beta(ic(r)+id(r)e^{2i \theta})$, for all $\alpha,\beta\in {\mathbb R}$, where $a,b,c,d$ are real-valued constants. However, by (\ref{5.2}), the eigenfunction in $V_1$ cannot have the form $a(r)+ib(r)e^{2i \theta}$ or $ia(r)+b(r)e^{2i \theta}$, where $a,b$ are nonzero real-valued. Moreover, by (\ref{5.2}), $w=a(r)+b(r) e^{2i \theta}\in V_1$, where $a, b$ are real-valued satisfies $L_\epsilon w = \lambda_1 w$ if and only if $\tilde{w}=ia(r)-ib(r) e^{2i \theta}\in V_1$ also satisfies $L_\epsilon \tilde{w} = \lambda_1 \tilde{w}$. Hence the eigenfunctions of $L_\epsilon$ in $V_1$ can be generated by all functions with the specific forms $a(r)+b(r)e^{2i \theta}$ and $ia(r)-ib(r)e^{2i \theta}$, where $a, b$ are real. Therefore the first eigenfunction has the specific form in Theorem~I(i). Now we introduce the stretched variable $X=x/\epsilon$. Then we transform the operator $L_\epsilon$ into another linear operator $\tilde{L}_\epsilon$ defined by \begin{equation}\label{5.3} \tilde{L_\epsilon} \tilde{v}(X)= -\Delta_X \tilde{v} - (1-|\Psi_0|^2)\tilde{v}+ 2(\Psi_0\cdot \tilde{v})\Psi_0 \end{equation} for $X\in B_{1/\epsilon}(0), \tilde{v} =\tilde{v}(X)\in H_0^1(B_{1/\epsilon}(0)) \cap H^2(B_{1/\epsilon}(0))$, where $\Psi_0(X)= f_0(s) e^{i \theta}$, $s=|X|$, $\theta= \arg X$ and $\Delta= \partial_{X_1}^2+\partial_{X_2}^2$. Then $\tilde{L}_\epsilon \tilde{v}=\epsilon^2 \lambda_1 \tilde{v}$ if and only if $L_\epsilon v = \lambda_1 v$, where $\tilde{v}(X)=v(\epsilon X)$ for $X\in B_{1/\epsilon}(0)$. Hence Theorem I(i) implies that $\tilde{L}_\epsilon$ has the first eigenvalue $\epsilon^2 \lambda_1$ and the associated eigenfunctions $$ \tilde{e}_{1}(s,\theta)= \tilde{a}_{\epsilon}(s)+\tilde{b}_{\epsilon}(s) e^{2i \theta}\,,\quad \tilde{e}_{2}(s,\theta)= i \tilde{a}_{\epsilon}(s)-i \tilde{b}_{\epsilon} (s) e^{2i \theta}\,, $$ where $\tilde{a}_\epsilon(s)= a_\epsilon(\epsilon s)$ and $\tilde{b}_\epsilon(s)= b_\epsilon(\epsilon s)$. We may assume that $\| \tilde{e}_{j}\|_{L^2} = 1\,, j= 1, 2$. Here $\|\cdot\|_{L^2}$ denotes the $L^2$ norm on $B_{1/\epsilon}(0)$. The estimates of $\tilde{e}_j$'s are given as follows. \paragraph{Proposition I:} {\it Let $\tilde{w}_j= \frac{1}{\| \partial_{X_j} \Psi_0\|_{L^2}} \partial_{X_j}\Psi_0$, $j=1,2$. Assume that $\langle\tilde{w}_j\,,\tilde{e}_{j}\rangle > 0$ for $j=1,2$. Then the eigenfunctions $\tilde{e}_{j}$ satisfy $$ \tilde{e}_{j}= \tilde{w}_j+ \nu_{j\,,\epsilon}\quad\hbox{and}\quad \| \nu_{j\,,\epsilon}\|_{L^2} = O((\log \frac{1}{\epsilon})^{-1/2})\quad\hbox{as } \epsilon\to +\,. $$ } Theorem~I and Proposition~I are important tools for analyzing the vortex solution of the Ginzburg-Landau equation \begin{eqnarray} \label{5.4} &\epsilon^2\Delta u+(1-|u|^2)u = 0 \quad\hbox{ in } \Omega\,,& \\ &u = g \quad\hbox{ on } \partial\Omega\,,&\nonumber \end{eqnarray} where $\Omega$ is a bounded smooth domain, $u:\Omega\to {\mathbb C}$ is the solution and $g:\partial\Omega\to S^1$ is smooth with degree $d\geq 1$. Some important results on the vortex solution of (\ref{5.4}) are presented in \cite{BBH}; however, the structure of all vortex solutions in (\ref{5.4}) is still unknown. Basically, (\ref{5.4}) is one of singular perturbation problems for which by the Liapunov-Schmidt method, it is quite possible to obtain a smooth solution of (\ref{5.4}) with $d$ degree-one vortices. One may use the symmetric vortex solution $U_1$ to set up an approximated solution with $d$ degree-one vortices. However, the spectrum of the linearized operator $L_\epsilon$ is essential in the Liapunov-Schmidt method. Therefore Theorem~I and Proposition~I become important for studying the vortex solutions in (\ref{5.4}). This paper is organized as follows. In Section~2, we prove Theorem~I and Proposition~I. In Section~3 and 4, we give a partial proof for the dynamics of vortices in nonlinear heat and Schr\"{o}dinger equations. This is an another application of Theorem~I and Proposition~I. \section{Proof of Theorem~I and Proposition~I} \setcounter{equation}{0} >From Theorem~B(iii), we assume that $w_\epsilon = a_{1,\epsilon}(r) + b_{1,\epsilon}(r)e^{2i\theta}$ is the first eigenfunction of $L_\epsilon$, where $a_{1,\epsilon}, b_{1,\epsilon}$ are real. Then $L_\epsilon w_\epsilon = \lambda_1 w_\epsilon$ becomes the system of ordinary differential equations as follows. \begin{eqnarray} &-a_{1,\epsilon}''-\frac{1}{r}a_{1,\epsilon}' =\frac{1}{\epsilon^2}(1-2f_{\epsilon}^2)a_{1,\epsilon} - \frac{1}{\epsilon^2}f_{\epsilon}^2 b_{1,\epsilon} + \lambda_1 a_{1,\epsilon}\quad \hbox{for } r\in (0, 1)\,,& \label{2.1} \\ &-b_{1,\epsilon}''-\frac{1}{r}b_{1,\epsilon}'+\frac{4}{r^2}b_{1,\epsilon} =\frac{1}{\epsilon^2}(1-2f_{\epsilon}^2)b_{1,\epsilon} - \frac{1}{\epsilon^2}f_{\epsilon}^2 a_{1,\epsilon} + \lambda_1 b_{1,\epsilon} \quad\hbox{for } r\in (0, 1)\,,& \nonumber \\ &a_{1,\epsilon}(1)=b_{1,\epsilon}(1)=0\,,& \nonumber \end{eqnarray} where $f_{\epsilon}(r)=f_0(\frac{r}{\epsilon})$. >From \cite{M1}, $a_{1,\epsilon}(r), b_{1,\epsilon}(r)$ are real analytic for $0 \leq r \leq 1$. Hence \begin{equation}\label{2.2} a_{1,\epsilon}(r) = \sum_{k=0}^\infty \alpha_k r^k\,, \quad b_{1,\epsilon}(r) = \sum_{k=0}^\infty \beta_k r^k\,,\quad \hbox{ for } 0\leq r\leq 1\,, \end{equation} where $\alpha_k, \beta_k \in{\mathbb R} $ are constants. By (\ref{1.3}), we have \begin{equation}\label{2.3} f_{\epsilon}^2(r)= \tilde{A}_\epsilon r^2 + \sum_{k=1}^\infty \tilde{P}_{2k+2}(\tilde{A}_\epsilon) r^{2k+2} \end{equation} for $0\leq r\leq 1$, where $\tilde{A}_\epsilon = (A_0/\epsilon)^2 > 0$ and $\tilde{P}_{2k+2}(\tilde{A}_\epsilon)$'s are constants depending on $\tilde{A}_\epsilon$. Taking (\ref{2.2}) and (\ref{2.3}) into (\ref{2.1}), and comparing the coefficients of $r^k$'s, we obtain that \begin{eqnarray*} \alpha_{2k+1} &=& \beta_{2k+1} = 0\,, \\ \alpha_{2k+2} &=& -\frac{\frac{1}{\epsilon^2}+\lambda_1}{4(k+1)^2}\alpha_{2k} + \frac{\tilde{A}_\epsilon}{4(k+1)^2}(2\alpha_{2k-2}+\beta_{2k-2}) \\ & &+ \frac{1}{4(k+1)^2} \sum_{l=1}^{k-1} \tilde{P}_{2l+2}(\tilde{A}_\epsilon) (2\alpha_{2(k-l-1)}+\beta_{2(k-l-1)})\,, \\ \beta_{2k+2} &=& -\frac{\frac{1}{\epsilon^2}+\lambda_1}{4k(k+2)}\beta_{2k} + \frac{\tilde{A}_\epsilon}{4k(k+2)}(2\beta_{2k-2}+\alpha_{2k-2}) \\ & &+ \frac{1}{4k(k+2)} \sum_{l=1}^{k-1} \tilde{P}_{2l+2}(\tilde{A}_\epsilon) (2\beta_{2(k-l-1)}+\alpha_{2(k-l-1)})\,. \end{eqnarray*} Hence by induction, we have \begin{eqnarray} \label{2.4} a_{1,\epsilon}(r)&=&\alpha_0 \sum_{k=0}^\infty C_{2k} r^{2k} + \beta_2 \sum_{k=3}^\infty D_{2k} r^{2k}\,, \\ b_{1,\epsilon}(r)&=&\alpha_0 \sum_{k=2}^\infty E_{2k} r^{2k} + \beta_2\sum_{k=1}^\infty F_{2k} r^{2k}\,, \nonumber \end{eqnarray} for $0\leq r\leq 1$ , where $C_0=1$, $F_2=1$, $E_4= \frac{\tilde{A}_\epsilon}{12}$, $D_6= \frac{\tilde{A}_\epsilon}{36}$ and all the other $C_{2k}, D_{2k}, E_{2k}$ and $F_{2k}$'s depend only on $k, \epsilon, \lambda_1$ and $\tilde{A}_\epsilon$. Now we show the eigenspace of $\lambda_1$ is two dimensional. By (\ref{2.4}) and $a_{1,\epsilon}(1)=b_{1,\epsilon}(1)=0$, we obtain that $\alpha_0$ and $\beta_2$ must satisfy one of the following conditions: \begin{description} \item{(1)}~$\beta_2 = K_1 \alpha_0$ and $\alpha_0$ is any real number, where $K_1$ is a constant independent of $\alpha_0$. \item{(2)}~$\alpha_0=0$, $\beta_2$ is any real number. \item{(3)}~both $\alpha_0$ and $\beta_2$ are any real numbers. \end{description} Suppose (2) or (3) holds. Setting $\alpha_0 = 0, \beta_2 = 1$, then \begin{equation}\label{2.5} |a_{1,\epsilon}| < |b_{1,\epsilon}|\quad\hbox{ as } r\to 0+\,. \end{equation} However, Theorem~B(iii) implies that $|a_{1,\epsilon}| \geq |b_{1,\epsilon}|$ for $0\leq r \leq 1$. This is a contradiction with (\ref{2.5}). Hence only the case (1) holds. Thus the eigenfunction $w_\epsilon= a_{1,\epsilon}(r) + b_{1,\epsilon}(r)e^{2i \theta}$ satisfies \begin{eqnarray} \label{2.6} a_{1,\epsilon}(r)&=&\alpha_0(\sum_{k=0}^\infty C_{2k} r^{2k}+K_1 \sum_{k=3}^\infty D_{2k} r^{2k})\,, \\ b_{1,\epsilon}(r)&=&\alpha_0(\sum_{k=2}^\infty E_{2k} r^{2k}+ K_1\sum_{k=1}^\infty F_{2k} r^{2k})\,. \nonumber \end{eqnarray} >From (\ref{5.2}), it is easy to check that $L_\epsilon \hat{w}_\epsilon = \lambda_1 \hat{w}_\epsilon$, where $\hat{w}_\epsilon= i a_{1,\epsilon}(r)-i b_{1,\epsilon}(r) e^{2i \theta}$. Therefore the eigenspace of $\lambda_1$ is only two-dimensional, spanned by $a_{1,\epsilon}(r)+ b_{1,\epsilon}(r)e^{2i \theta}$ and $i a_{1,\epsilon}(r)- i b_{1,\epsilon}(r)e^{2i \theta}$, and we complete the proof of theorem I(i). Now we prove Theorem I(ii) by contradiction. Suppose the second eigenvalue $\lambda_2\to 0$ as $\epsilon\to 0+$. Let $w_{2,\epsilon}$ be the eigenfunction associated with $\lambda_2$. Then Theorem~B implies that $w_{2,\epsilon}\in V_1$. Hence by (\ref{5.2}), $L_\epsilon w_{2,\epsilon} = \lambda_2 w_{2,\epsilon} \in V_1$ and $\lambda_2\in{\mathbb R}$, we obtain that $w_{2,\epsilon}$ must have the form $w_{2,\epsilon}= a_{2,\epsilon}(r)+b_{2,\epsilon}(r) e^{2i \theta}$, where $a_{2,\epsilon}, b_{2,\epsilon}$ are real-valued. Thus the equations $L_\epsilon w_\epsilon = \lambda_1 w_\epsilon$ and $L_\epsilon w_{2,\epsilon} = \lambda_2 w_{2,\epsilon}$ become the systems of ordinary differntial equations as follows. \begin{eqnarray} &-a''_{j,\epsilon}-\frac{1}{r}a'_{j,\epsilon} = \frac{1}{\epsilon^2}(1-2f_0^2(\frac{r}{\epsilon}))a_{j,\epsilon} - \frac{1}{\epsilon^2}f_0^2(\frac{r}{\epsilon})b_{j,\epsilon} + \lambda_j a_{j,\epsilon}\,,& \nonumber \\ &-b''_{j,\epsilon}-\frac{1}{r}b'_{j,\epsilon}+\frac{4}{r^2} b_{j,\epsilon}= \frac{1}{\epsilon^2}(1-2f_0^2(\frac{r}{\epsilon}))b_{j,\epsilon} - \frac{1}{\epsilon^2}f_0^2(\frac{r}{\epsilon})a_{j,\epsilon} + \lambda_j b_{j,\epsilon}\,,& \nonumber\\ &a_{j,\epsilon}(1) = b_{j,\epsilon}(1) = 0\,,&\label{2.12} \end{eqnarray} for $r\in (0, 1]\,,j=1, 2$. Now we assume that $r = \epsilon s, a_{j, \epsilon} (s) = a_{j, \epsilon} (\epsilon s)$ and $b_{j, \epsilon} (s) = b_{j, \epsilon} (\epsilon s)\,.$ For the notation convenience, we use the same $a_{j\,,\epsilon}$ and $b_{j\,,\epsilon}$ after the scaling $r=\epsilon s$. Then (\ref{2.12}) implies \begin{eqnarray} &-a''_{j,\epsilon}-\frac{1}{s}a'_{j,\epsilon} = (1-2f_0^2(s))a_{j,\epsilon} - f_0^2(s)b_{j,\epsilon} + \lambda_j\epsilon^2 a_{j,\epsilon}\,,& \nonumber\\ &-b''_{j,\epsilon}-\frac{1}{s}b'_{j,\epsilon}+\frac{4}{s^2} b_{j,\epsilon}=(1-2f_0^2(s))b_{j,\epsilon} - f_0^2(s)a_{j,\epsilon} + \lambda_j\epsilon^2 b_{j,\epsilon}\,,& \nonumber\\ &a_{j,\epsilon}(\frac{1}{\epsilon}) = b_{j,\epsilon}(\frac{1}{\epsilon}) = 0\,,& \label{2.13} \end{eqnarray} for $s\in (0, \frac{1}{\epsilon}]\,,j=1, 2$. By \cite{M1}, we set $$ a_{j,\epsilon}(s) = \sum_{k=0}^\infty \alpha_{j,k} s^k\,, b_{j,\epsilon}(s) = \sum_{k=0}^\infty \beta_{j,k} s^k \,,\quad\hbox{ for } 0\leq s\leq\frac{1}{\epsilon}, j=1,2\,, $$ where $\alpha_{j,k}$'s and $\beta_{j,k}$'s are constants. >From (\ref{1.3}), $f_0(s)$ satisfies \begin{eqnarray} \label{2.7} f_0(s)&=&A_0 s + \sum_{k=1}^\infty P_{2k+1}(A_0) s^{2k+1}\,,\\ \label{2.8} f_0^2(s)&=&\tilde{A}_0 s^2 + \sum_{k=1}^\infty \tilde{P}_{2k+2}(\tilde{A}_0) s^{2k+2}\,, \end{eqnarray} for $s > 0$, where $\tilde{A}_0= A_0^2$ and $\tilde{P}_{2k+2}(\tilde{A}_0)$'s are constants depending on $\tilde{A}_0$. Moreover, (\ref{1.4}) implies \begin{eqnarray} \label{2.9} f_0(s)&=&1-\frac{1}{2s^2}-\frac{9}{8s^4}+ \dots \quad\hbox{as } s \to +\infty\,, \\ \label{2.10} f_0^2(s)&=&1-\frac{1}{s^2}-\frac{2}{s^4}+ \dots \quad\hbox{ as } s \to +\infty\,. \end{eqnarray} Then by (\ref{2.13}) and (\ref{2.8}), we obtain a similar formula to (\ref{2.4}) as follows. \begin{eqnarray}\label{2.14} a_{j, \epsilon}(s)&=&\alpha_{j, 0} \sum_{k=0}^\infty C_{j, 2k} s^{2k} + \beta_{j, 2} \sum_{k=3}^\infty D_{j, 2k} s^{2k}\,,\\[5pt] b_{j, \epsilon}(s)&=&\alpha_{j, 0} \sum_{k=2}^\infty E_{j, 2k} s^{2k} + \beta_{j, 2}\sum_{k=1}^\infty F_{j, 2k} s^{2k}\,,\quad 0\leq s\leq \frac{1}{\epsilon} \nonumber \end{eqnarray} where $C_{j, 0}=1$, $F_{j, 2}=1$, $E_{j, 4}= \frac{\tilde{A}_0}{12}$, $D_{j, 6}= \frac{\tilde{A}_0}{36}$ and all the other $C_{j, 2k}$, $D_{j, 2k}$, $E_{j, 2k}$ and $F_{j, 2k}$ are depending only on $\epsilon, \lambda_j$ and $\tilde{A}_0$. >From (\ref{2.6}), we have $\beta_{1,2} = \tilde{K}_1\alpha_{1,0}$, where $\tilde{K}_1$ is a constant independent of $\alpha_{1,0}$. Hence by Theorem~B(ii), (\ref{2.13}), (\ref{2.14}) and the standard ordinary differential equation theorem, we have \begin{equation}\label{2.15} \begin{array}{c} a_{1,\epsilon}(s) = \alpha_{1,0}[(a_0(s)+\lambda_1\epsilon^2 \hat{a}_{1,\epsilon}(s)) + \tilde{K}_1(a_1(s)+\lambda_1\epsilon^2 \breve{a}_{1,\epsilon}(s))]\,, \\ b_{1,\epsilon}(s) = \alpha_{1,0}[(b_0(s)+\lambda_1\epsilon^2 \hat{b}_{1,\epsilon}(s)) + \tilde{K}_1(b_1(s)+\lambda_1\epsilon^2 \breve{b}_{1,\epsilon}(s))]\,, \end{array} \end{equation} where $a_0(s) = O(1)$, $\hat{a}_{1,\epsilon}(s) = O(s^2)$, $a_1(s) = O(s^6)$, $\breve{a}_{1,\epsilon}(s) = O(s^8)$, $b_0(s) = O(s^4)$, $\hat{b}_{1,\epsilon}(s) = O(s^6)$, $b_1(s) = O(s^2)$, $\breve{b}_{1,\epsilon}(s) = O(s^4)$ as $s\to 0+$, and $(a_j(s)\,,b_j(s))$'s are solutions of \begin{equation}\label{2.16} \begin{array}{c} -a''-\frac{1}{s}a' = (1-2f_0^2(s))a - f_0^2(s)b\,, \\[5pt] -b''-\frac{1}{s}b'+\frac{4}{s^2}b =(1-2f_0^2(s))b - f_0^2(s)a \hbox{ for } s > 0\,. \end{array} \end{equation} By $a_{2, \epsilon}(\frac{1}{\epsilon})= b_{2,\epsilon} (\frac{1}{\epsilon})=0$ and (\ref{2.14}), we obtain that $\alpha_{2,0}$ and $\beta_{2,2}$ satisfy one of the following cases: \begin{description} \item{(a)} $\beta_{2,2} = \tilde{K}_2 \alpha_{2,0}$ and $\alpha_{2,0}$ is any real number, where $\tilde{K}_2$ is a constant independent of $\alpha_{2,0}$. \item{(b)} $\alpha_{2,0}=0$, $\beta_{2,2}$ is any real number. \item{(c)} Both $\alpha_{2,0}$ and $\beta_{2,2}$ are any real numbers. \end{description} For the case (a), we utilize (\ref{2.13}), (\ref{2.14}), $\lambda_2\to 0$ as $\epsilon\to 0+$ and the standard ordinary differential equation theorem. Then we obtain that \begin{equation}\label{2.21} \begin{array}{c} a_{2,\epsilon}(s)=\alpha_{2,0}(a_0(s)+\lambda_2\epsilon^2 \hat{a}_{2,\epsilon}(s)) +\alpha_{2,0}\tilde{K}_2(a_1(s)+\lambda_2\epsilon^2\breve{a}_{2,\epsilon} (s))\,, \\[5pt] b_{2,\epsilon}(s)=\alpha_{2,0}(b_0(s)+\lambda_2\epsilon^2 \hat{b}_{2,\epsilon}(s)) +\alpha_{2,0}\tilde{K}_2(b_1(s) +\lambda_2\epsilon^2\breve{b}_{2,\epsilon}(s))\,, \\ \end{array} \end{equation} where $\hat{a}_{2,\epsilon}(s)=O(s^2)$, $\breve{a}_{2,\epsilon}(s)= O(s^8)$, $\hat{b}_{2,\epsilon}(s)=O(s^6)$ and $\breve{b}_{2,\epsilon}(s)=O(s^4)$ as $s\to 0+$. Moreover, for any $M>0$ (independent of $\epsilon$), there exist $\epsilon_1=\epsilon_1(M)>0$ and $\kappa=\kappa(M)>0$ such that \begin{equation}\label{2.40} |\hat{a}_{j\,,\epsilon}(s)|, |\hat{b}_{j\,,\epsilon}(s)|, |\breve{a}_{j\,,\epsilon}(s)|, |\breve{b}_{j\,,\epsilon}(s)|\leq \kappa \end{equation} for $0\leq s\leq M,\quad 0<\epsilon\leq \epsilon_1, j=1, 2$. Now we set $\alpha_{1,0} =\alpha_{2,0} =1$. Then we have \begin{equation}\label{2.22} \begin{array}{c} w_{2,\epsilon}-w_{\epsilon}=\alpha_\epsilon(r)+\beta_\epsilon(r) e^{2i\theta}\,,\\ \alpha_\epsilon(r)=(\tilde{K}_2-\tilde{K}_1)a_1(\frac{r}{\epsilon}) +\epsilon^2[\lambda_2(\hat{a}_{2,\epsilon}+\tilde{K}_2 \breve{a}_{2,\epsilon})-\lambda_1(\hat{a}_{1,\epsilon} +\tilde{K}_1\breve{a}_{1,\epsilon})](\frac{r}{\epsilon})\,,\\ \beta_\epsilon(r)=(\tilde{K}_2-\tilde{K}_1)b_1(\frac{r}{\epsilon}) +\epsilon^2[\lambda_2(\hat{b}_{2,\epsilon}+\tilde{K}_2 \breve{b}_{2,\epsilon})-\lambda_1(\hat{b}_{1,\epsilon} +\tilde{K}_1\breve{b}_{1,\epsilon})](\frac{r}{\epsilon})\,.\\ \end{array} \end{equation} \noindent For (\ref{2.16}), we have the following result. \paragraph{Lemma I} {\it Let $(a_j\,,b_j), j= 2,\dots,5\,,$ be the fundamental solutions of (\ref{2.16}). Then $$ a_2(s)=\frac{f_0}{s}+f_0'\,, b_2(s)=f_0'-\frac{f_0}{s} \quad\hbox{for } s>0 $$ and the asymptotic behaviors of $(a_j\,,b_j), j=3,4,5$ are as follows. $$\displaylines{ a_3(s)= e^{-\sqrt{2} s} s^{-1/2} (\frac{1}{2}-\frac{5}{16\sqrt{2}s} +O(\frac{1}{s^2}))\,, \cr b_3(s)= e^{-\sqrt{2} s} s^{-1/2} (\frac{1}{2}-\frac{5}{16\sqrt{2}s} +O(\frac{1}{s^2}))\,, \cr a_4(s)= \frac{s}{2} + O(\frac{1}{s})\,, b_4(s)= -\frac{s}{2} + O(\frac{1}{s})\,, \cr a_5(s),\; b_5(s) \geq \frac{1}{8} s^2\quad\hbox{as } s \to +\infty\,. }$$ } We will prove Lemma~I in the appendix. Now we claim that \begin{equation}\label{2.20} \int_1^\infty \frac{1}{s} a_1^2(s)\,ds = +\infty\,. \end{equation} We prove this by contradiction. Suppose $$ \int_1^\infty \frac{1}{s} a_1^2(s)\,ds < +\infty\,. $$ Since $(a_1, b_1)$ is a solution of (\ref{2.16}), then by Lemma I, we have $$ (a_1, b_1)=\alpha(a_2, b_2)+\beta(a_3, b_3)\,, $$ where $\alpha, \beta \in {\mathbb R}$. Moreover, by $a_1(s)\sim s^6, b_1(s)\sim s^2, a_2(s)\sim 1$ and $b_2(s)\sim s^2$ as $s\to 0+$, then we obtain that $\beta\neq 0$ and $\beta(a_3, b_3)=(a_1, b_1)-\alpha(a_2, b_2)$ is bounded as $s\to 0+$. Hence $(a_3, b_3)$ is a nontrivial bounded solution of (\ref{2.16}). From Lemma~I, $(a_3, b_3)$ decays exponentially to zero as $s\to +\infty$. Thus $(a_3\,,b_3)$ is an eigenfunction in $L^2({\mathbb R}^2)\times L^2({\mathbb R}^2)$ of ${\it L}$ associated with the eigenvalue $0$, where ${\it L}$ is the linear operator defined by $$ {\it L}(a, b)=\left( \begin{array}{c} a''+\frac{1}{s}a'+(1-2f_0^2)a-f_0^2b \\ b''+\frac{1}{s}b'-\frac{4}{s^2}b+(1-2f_0^2)b-f_0^2a \end{array} \right)\,, $$ for $a=a(s)$, $b=b(s)$, $s > 0$. This is a contradiction with Proposition~5.4 in \cite{WX}. Therefore we obtain (\ref{2.20}). Now we use (\ref{2.20}) to complete the proof of Theorem I(ii). By (\ref{2.22}) and Theorem I(i) in \cite{L}, we have \begin{eqnarray*} \lefteqn{\frac{1}{2\pi} \langle L_\epsilon (w_{2,\epsilon}-w_{\epsilon})\,, w_{2,\epsilon} -w_{\epsilon}\rangle } \\ &=&\int_0^1 r[(\alpha_\epsilon')^2+(\beta_\epsilon')^2]+\frac{4}{r} \beta_\epsilon^2 -\frac{r}{\epsilon^2}(1-f_0^2(\frac{r}{\epsilon}))(\alpha_\epsilon^2 + \beta_\epsilon^2)\,dr \\ & &+ \int_0^1 \frac{r}{\epsilon^2}f_0^2(\frac{r}{\epsilon})(\alpha_\epsilon+\beta_\epsilon)^2\,dr \\ &=& \int_0^1 r (\alpha_\epsilon')^2 + \frac{1}{r} \alpha_\epsilon^2 -\frac{r}{\epsilon^2}(1-f_0^2(\frac{r}{\epsilon})) \alpha_\epsilon^2\,dr \\ & &+ \int_0^1 r (\beta_\epsilon')^2 + \frac{1}{r} \beta_\epsilon^2 -\frac{r}{\epsilon^2}(1-f_0^2(\frac{r}{\epsilon})) \beta_\epsilon^2\,dr \\ & &+ \int_0^1 \frac{3}{r} \beta_\epsilon^2 - \frac{1}{r} \alpha_\epsilon^2 + \frac{r}{\epsilon^2}f_0^2(\frac{r}{\epsilon})(\alpha_\epsilon + \beta_\epsilon)^2\,dr\\ &\geq& \frac{c}{2\pi} \| w_{2,\epsilon}-w_{\epsilon} \|_{L^2}^2 + \int_0^1 \frac{3}{r} \beta_\epsilon^2 - \frac{1}{r} \alpha_\epsilon^2 + \frac{r}{\epsilon^2}f_0^2(\frac{r}{\epsilon})(\alpha_\epsilon + \beta_\epsilon)^2\,dr \\ &=&\frac{c}{2\pi} (\| w_{2,\epsilon}\|_{L^2}^2 + \| w_{\epsilon}\|_{L^2}^2) \\ &&+ \int_0^{1/\epsilon} \frac{3}{s} \beta_\epsilon^2(\epsilon s) - \frac{1}{s} \alpha_\epsilon^2(\epsilon s) + sf_0^2(s)(\alpha_\epsilon + \beta_\epsilon)^2(\epsilon s)\,ds \,, \end{eqnarray*} where $c > 0$ is a constant independent of $\epsilon$. Then \begin{equation}\label{2.23} \begin{array}{l} \frac{1}{2\pi} \langle L_\epsilon (w_{2,\epsilon}-w_{\epsilon}), w_{2,\epsilon}-w_{\epsilon}\rangle \\ \geq \frac{c}{2\pi} (\| w_{2,\epsilon}\|_{L^2}^2 + \| w_{\epsilon} \|_{L^2}^2) + \int_0^{1/\epsilon} \frac{3}{s} \beta_\epsilon^2(\epsilon s) - \frac{1}{s} \alpha_\epsilon^2(\epsilon s) + sf_0^2(s)(\alpha_\epsilon + \beta_\epsilon)^2(\epsilon s)\,ds \,, \end{array} \end{equation} Setting $\beta_\epsilon=\tau\alpha_\epsilon$, then we obtain $$ \frac{3}{s}\beta_\epsilon^2-\frac{1}{s}\alpha_\epsilon^2+sf_0^2(s)(\alpha_\epsilon+\beta_\epsilon)^2 =[\frac{3}{s}\tau^2-\frac{1}{s}+sf_0^2(s)(1+\tau)^2]\alpha_\epsilon^2\,. $$ It is easy to check that $$ \frac{3}{s}\tau^2-\frac{1}{s}+sf_0^2(s)(1+\tau)^2 \geq H_0(s) \hbox{ for } \tau\in{\mathbb R}\,,s>0\,, $$ where $H_0(s)=\frac{2s^2f_0^2(s)-3}{s(3+s^2f_0^2(s))}$. Hence (\ref{2.23}) becomes \begin{eqnarray}\label{2.24} \lefteqn{ \frac{1}{2\pi} \langle L_\epsilon (w_{2,\epsilon}-w_{\epsilon})\,, w_{2,\epsilon}-w_{\epsilon}\rangle} \\ & \geq& \frac{c}{2\pi}(\| w_{\epsilon}\|_{L^2}^2+\| w_{2,\epsilon}\|_{L^2}^2) +\int_0^{1/\epsilon} H_0(s)\alpha_\epsilon^2(\epsilon s)\,ds\,. \nonumber \end{eqnarray} Note that (\ref{2.10}) implies that $H_0(s)\geq \frac{1}{s}$ as $s\to +\infty$. Thus by (\ref{2.20}), we may choose a large constant $M > 0$ independent of $\epsilon$ such that \begin{equation} \label{2.25} \int_0^M H_0(s) a_1^2(s)\,ds > 0 \quad\hbox{and}\quad H_0(s) > 0 \quad\hbox{for } s\geq M\,. \end{equation} Therefore by (\ref{2.24}), we have \begin{eqnarray} \lefteqn{\frac{1}{2\pi}\langle L_\epsilon (w_{2,\epsilon} -w_{\epsilon})\,,w_{2,\epsilon}-w_{\epsilon}\rangle}\nonumber \\ &\geq& c \epsilon^2 \int_0^{1/\epsilon} s\sum_{j=1}^2 (a_{j,\epsilon}^2+b_{j,\epsilon}^2)(s)\,ds + \int_0^{1/\epsilon} H_0(s)\alpha_\epsilon^2(\epsilon s)\,ds \nonumber\\ &=& \frac{c}{2} \epsilon^2 \int_0^M s\sum_{j=1}^2 (a_{j,\epsilon}^2+b_{j,\epsilon}^2)(s)\,ds \label{2.26} \\ & &+ \frac{c}{2} \epsilon^2 \int_0^M s\sum_{j=1}^2 (a_{j,\epsilon}^2+b_{j,\epsilon}^2)(s)\,ds + \int_0^M H_0(s)\alpha_\epsilon^2(\epsilon s)\,ds \nonumber\\ & &+ c \epsilon^2 \int_M^{1/\epsilon} s\sum_{j=1}^2 (a_{j,\epsilon}^2+b_{j,\epsilon}^2)(s)\,ds + \int_M^{1/\epsilon} H_0(s)\alpha_\epsilon^2(\epsilon s)\,ds\nonumber \end{eqnarray} Furthermore, by (\ref{2.22}), we have \begin{eqnarray} \lefteqn{ \int_0^M H_0(s)\alpha_\epsilon^2(\epsilon s)\,ds }\nonumber \\ &=& (\tilde{K}_2-\tilde{K}_1)^2 \int_0^M H_0(s)a_1^2(s)\,ds + 2(\tilde{K}_2-\tilde{K}_1)\epsilon^2 \int_0^M H_0(s)a_1(s) \nonumber \\ &&\times\Big[\lambda_2(\hat{a}_{2,\epsilon}+\tilde{K}_2 \breve{a}_{2,\epsilon})-\lambda_1(\hat{a}_{1,\epsilon} +\tilde{K}_1\breve{a}_{1,\epsilon})\Big](s)\,ds \label{2.27} \\ &&+ \epsilon^4 \int_0^M H_0(s) [\lambda_2(\hat{a}_{2,\epsilon}+\tilde{K}_2\breve{a}_{2,\epsilon})- \lambda_1(\hat{a}_{1,\epsilon}+\tilde{K}_1\breve{a}_{1,\epsilon})]^2(s) \,ds \nonumber \end{eqnarray} Hence by (\ref{2.15}), (\ref{2.21}), (\ref{2.40}), (\ref{2.25}), (\ref{2.27}) and $\alpha_{1,0}=\alpha_{2,0}=1$, $\lambda_1, \lambda_2\to 0$ as $\epsilon\to 0+$, we have \begin{equation}\label{2.28} \frac{c}{2} \epsilon^2 \int_0^M s\sum_{j=1}^2 (a_{j,\epsilon}^2+b_{j,\epsilon}^2)(s)\,ds + \int_0^M H_0(s)\alpha_\epsilon^2(\epsilon s)\,ds > 0 \end{equation} as $0 < \epsilon \leq \epsilon_1$, where $\epsilon_1 > 0$ is a small constant. Thus by (\ref{2.25}), (\ref{2.26}) and (\ref{2.28}), we obtain \begin{eqnarray*} \frac{1}{2\pi} \langle L_\epsilon (w_{2,\epsilon} - w_{\epsilon})\,, w_{2,\epsilon}- w_{\epsilon}\rangle &\geq& \frac{c}{2} \epsilon^2 \int_0^{1/\epsilon} s\sum_{j=1}^2 (a_{j,\epsilon}^2+b_{j,\epsilon}^2)(s)\,ds \\ &=& \frac{c}{4 \pi} (\| w_{\epsilon}\|_{L^2}^2+ \| w_{2,\epsilon}\|_{L^2}^2)\,, \end{eqnarray*} then \begin{equation} \label{2.29} \frac{1}{2\pi} \langle L_\epsilon (w_{2,\epsilon}- w_{\epsilon})\,, w_{2,\epsilon} - w_{\epsilon}\rangle \geq \frac{c}{4 \pi} (\| w_{\epsilon}\|_{L^2}^2+ \| w_{2,\epsilon}\|_{L^2}^2)\,. \end{equation} On the other hand, by the definition of $w_{\epsilon}$ and $w_{2,\epsilon}$, we have \begin{eqnarray*} \frac{1}{2\pi} \langle L_\epsilon (w_{2,\epsilon}-w_{\epsilon})\,, w_{2,\epsilon}- w_{\epsilon}\rangle &=& \frac{1}{2\pi} \langle\lambda_2 w_{2,\epsilon} -\lambda_1 w_{\epsilon}\,,w_{2,\epsilon}-w_{\epsilon}\rangle \\ &=& \frac{1}{2\pi}(\lambda_1 \| w_{\epsilon}\|_{L^2}^2+\lambda_2 \| w_{2,\epsilon}\|_{L^2}^2) \\ &\leq& \frac{1}{2\pi}\lambda_2 (\| w_{\epsilon}\|_{L^2}^2+\| w_{2,\epsilon}\|_{L^2}^2) \end{eqnarray*} then \begin{equation}\label{2.30} \frac{1}{2\pi} \langle L_\epsilon (w_{2,\epsilon}-w_{\epsilon})\,,w_{2,\epsilon} -w_{\epsilon}\rangle \leq \frac{1}{2\pi} \lambda_2 (\| w_{\epsilon}\|_{L^2}^2+\| w_{2,\epsilon}\|_{L^2}^2) \,. \end{equation} Therefore (\ref{2.29}) and (\ref{2.30}) imply $$ \lambda_2 \geq \frac{c}{2} \equiv c_1 > 0 \quad\hbox{for } 0 < \epsilon \leq \epsilon_1\,. $$ For cases (b) and (c), we set $\alpha_{2,0}=0$, $\beta_{2,2}=1$ in (\ref{2.14}). Then we obtain that \begin{equation}\label{6.2} \begin{array}{c} w_{2,\epsilon}= a_{2,\epsilon}(r)+b_{2,\epsilon}(r) e^{2i \theta}\,, \\ a_{2,\epsilon}= a_1(\frac{r}{\epsilon})+ \epsilon^2\lambda_2 \breve{a}_{2,\epsilon}(\frac{r}{\epsilon})\,, \\ b_{2,\epsilon}= b_1(\frac{r}{\epsilon})+ \epsilon^2\lambda_2 \breve{b}_{2,\epsilon}(\frac{r}{\epsilon})\,. \end{array} \end{equation} Note that (\ref{6.2}) has the similar form to (\ref{2.22}). Hence we may apply the same argument as (\ref{2.29}) to derive that $$ \frac{1}{2\pi}\langle L_\epsilon w_{2,\epsilon}\,, w_{2,\epsilon}\rangle \geq \frac{c}{4 \pi} \| w_{2,\epsilon}\|_{L^2}^2\,. $$ i.e. $\lambda_2 \geq \frac{c}{2} \equiv c_1 > 0$. Therefore we complete the proof of Theorem~I(ii). For the proof of Theorem~I(iii), we define the approximate eigenfunction $\tilde{U}_\epsilon(s,\theta)$ as follows. $$\displaylines{ \tilde{U}_\epsilon(s,\theta) = a(s) + b(s) e^{2 i \theta} \,,\cr a(s) = \left\{\begin{array}{ll} \frac{1}{s}f_0(s)+f'_0(s) &\hbox{ if } 00$ is a large constant independent of $\epsilon$, and $B_\epsilon, D_\epsilon$ are defined by \begin{equation} \begin{array}{c} B_\epsilon (1-\epsilon R_\epsilon)^3 = \frac{1}{R_\epsilon} f_0(R_\epsilon)+f_0'(R_\epsilon)\,, \\[5pt] D_\epsilon (1-\epsilon R_\epsilon)^3 = f_0'(R_\epsilon) -\frac{1}{R_\epsilon}f_0(R_\epsilon)\,. \end{array} \end{equation} Let $$ U_\epsilon(s\,,\theta) = C_\epsilon^{-1} \tilde{U}_\epsilon(s\,,\theta)\,, \quad C_\epsilon = \| \tilde{U}_\epsilon \|_{L^2}\,. $$ Then it is easy to check that $U_\epsilon\in H_0^1(B_{1/\epsilon})$ and $\tilde{L}_\epsilon U_\epsilon = 0$ for $0 0$ is a constant independent of $\epsilon$. Thus by (\ref{2.32}) and (\ref{2.35}), we have \begin{equation}\label{2.36} \| \nu_\epsilon\|_{L^2} = O((\log\frac{1}{\epsilon})^{-1/2})\,. \end{equation} >From (\ref{2.34}) and the definition of $U_\epsilon$, we obtain $$ \| U_\epsilon\|_{L^2}^2 = \gamma_\epsilon^2 + \| \nu_\epsilon\|_{L^2}^2 = 1\,. $$ Therefore by (\ref{2.36}), we have $\gamma_\epsilon^2 = 1 + O((\log\frac{1}{\epsilon})^{-1})$. Since $\langle\tilde{w}_1, \tilde{e}_1\rangle > 0$ and by (\ref{2.34}), then $\gamma_\epsilon > 0$. Hence \begin{equation}\label{2.37} \gamma_\epsilon = (1 + O((\log\frac{1}{\epsilon})^{-1}))^{1/2}\,. \end{equation} By (\ref{2.33}) and (\ref{2.37}), we obtain \begin{equation}\label{2.38} \| \frac{1}{\gamma_\epsilon} U_\epsilon - \tilde{w}_1\|_{L^2} = O((\log\frac{1}{\epsilon})^{-1/2}) \end{equation} From (\ref{2.34}) and (\ref{2.37}), we have $$ \tilde{e}_1=\frac{1}{\gamma_\epsilon} (U_\epsilon-\nu_\epsilon)=\tilde{w}_1+\nu_{1,\epsilon}\,, $$ where $\nu_{1\,,\epsilon}= \frac{1}{\gamma_\epsilon}(U_\epsilon-\nu_\epsilon) -\tilde{w}_1$. By (\ref{2.36}), (\ref{2.37}) and (\ref{2.38}), we obtain that $$ \| \nu_{1\,,\epsilon}\|_{L^2}= O((\log\frac{1}{\epsilon})^{-1/2}) \,. $$ Similarly, we have $$ \tilde{e}_{2} = \tilde{w}_2 + \nu_{2\,,\epsilon}\,, \| \nu_{2\,,\epsilon}\|_{L^2}=O((\log\frac{1}{\epsilon})^{-1/2}) \,. $$ Therefore we complete the proof of Proposition~I. \section{Vortex dynamics in nonlinear heat equations} \setcounter{equation}{0} In this Section, we consider the system of nonlinear heat equations \begin{equation} \label{1.5} \begin{array}{c} u_t= \Delta u + \frac{1}{\epsilon^2} u (1- |u|^2) \quad\hbox{ for } x \in {\mathbb R}^2,\; t > 0 , \\ u|_{t=0}= u_0(x) \quad\hbox{ for } x \in {\mathbb R}^2. \end{array} \end{equation} \noindent The equation (\ref{1.5}) is the simplified Ginzburg-Landau equation which is from the Ginzburg-Landau theory of superconductivity. As $\epsilon$ is small, the Ginzburg-Landau theory predicted the existence of vortex state. The vortex state consists of many normal filaments embedded in a superconducting matrix. Each of these filaments carries with it a quantized amount of magnetic flux, and is circled by a vortex of superconducting current. The vortex structures are set in motion by a variety of mechanism, including thermal fluctuations and applied voltages and currents. Unfortunately, such vortex motion in an applied magnetic field induces an effective resistance in the material, and thus a loss of superconductivity. Therefore, it is crucial to understand the dynamics of these vortices in order to pin vortices at a fixed location, i.e. prevent their motion. Hereafter, vortices are so called point vortices which come from the cross section of vortex filaments. For the dynamics of vortices, E~\cite{E} used the formal asymptotic analysis to derive the dynamic laws of vortices. In this Section, we will give a more general proof of the dynamics of vortices. The idea of the proof is to use the generalized asymptotic expansion (\ref{1.9}), Theorem~I and Proposition~I. Assume that $x =q_\epsilon(t)\in{\mathbb R}^2$ is the vortex trajectory, $q_\epsilon$ is smooth in $t$, $q_\epsilon(0)= 0$, and $B_1(q_\epsilon(t))$ is the vortex core which moves along with the vortex trajectory $x = q_\epsilon(t)$. Here we assume that the solution $u$ has only one vortex center at $q_\epsilon(t)$. Now we focus on the vortex core $B_1(q_\epsilon(t))$ and consider the following system of equations \begin{equation} \label{1.6} \begin{array}{c} u_t = \Delta u + \frac{1}{\epsilon^2} u (1- |u|^2) \quad\hbox{ for } x \in B_1 (q_\epsilon(t)), t > 0 , \\ u|_{t=0} = u_0(x) \quad\hbox{ for } x \in B_1 (0)\,. \end{array} \end{equation} \noindent Assume that \begin{equation} \label{1.7} X = \frac{x-q_\epsilon(t)}{\epsilon}\,,\quad \Psi(X\,,t\,,\epsilon) = u(x\,,t\,,\epsilon) \end{equation} \noindent for $x\in B_1(q_\epsilon(t))\,,i.e. X\in B_{1/\epsilon}(0)\,, t\geq 0\,.$ Then we have \begin{equation} \label{1.8} \begin{array}{c} \epsilon^2 \Psi_t = \epsilon \dot{q}_\epsilon \cdot \nabla_X \Psi + \Delta_X \Psi+ \Psi (1- |\Psi|^2) \quad \hbox{ for } X \in B_{1/\epsilon} (0), t > 0 , \\ \Psi|_{t=0} = u_0 \quad\hbox{ for } X \in B_{1/\epsilon} (0)\,, \end{array} \end{equation} where $\Psi_t=\frac{\partial}{\partial t}\Psi(\cdot,t,\epsilon)$. We take an expansion form of $\Psi$ as follows. \begin{equation} \label{1.9} \Psi(X, t, \epsilon) = \Psi_0(X) e^{i H} + \epsilon \Psi_1(X, t, \epsilon) e^{i H}\,, \end{equation} where $\Psi_0(X) = f_0(s) e^{i \phi}$, $s = |X|$, $\phi = \arg(X)$. Note that (\ref{1.10}) is a more general form than the inner expansion form in \cite{N} and \cite{E}. Here we assume that $H=H(x,t,\epsilon)$ is a smooth real-valued function and satisfies \begin{equation} \label{1.10} \Delta_x H=0\,, |\nabla_x H|\,,|H_t|\,,|\nabla_x H_t|\leq K \hbox{ in } B_1(q_\epsilon(t))\,, \end{equation} where $H_t = \frac{\partial}{\partial t} H$ and $K > 0$ is a constant. We will derive the governing equation of the vortex trajectory $x =q_\epsilon(t)$ as follows. \begin{equation} \label{1.11} \begin{array}{lll} &\dot{q}_\epsilon &= \frac{1}{\log\frac{1}{\epsilon}+O(1)} [2J\nabla_x H(q_\epsilon\,,t\,,\epsilon) -c_\epsilon J\nabla_x H_t(q_\epsilon\,,t\,,\epsilon) + o_\epsilon(1)] \\ & &= \frac{1}{\log\frac{1}{\epsilon}+O(1)} [-2\nabla_x \tilde{H}(q_\epsilon\,,t\,,\epsilon) +c_\epsilon \nabla_x \tilde{H}_t(q_\epsilon\,,t\,,\epsilon) + o_\epsilon(1)]\,, \end{array} \end{equation} where $c_\epsilon=\epsilon^2\int_0^{1/\epsilon} sf_0^2(s)\,ds$, $J= \Big(\begin{array}{cc}0&1 \\ -1&0\end{array}\Big)$, and $H, \tilde{H}$ are harmonic conjugate. Here we denote $o_\epsilon(1)$ as a small quantity, independent of time $t$, and tending to zero as $\epsilon\to 0+$. >From (\ref{1.11}), it is remarkable that the velocity $\dot{q}_\epsilon$ of the vortex trajectory $x= q_\epsilon(t)$ can be nonzero and of order $O((\log \frac{1}{\epsilon})^{-1})$ if the function $H$ is nonconstant. We require that $\Psi_1$ satisfies the "small" perturbation condition as follows. \begin{equation} \label{3.1} \begin{array}{c} \|\nabla_X \Psi_1\|_{L^2(B_\frac{1}{\epsilon})} \leq K\epsilon^{-\beta}\,, \quad 0<\beta<1\,, \\ \| \Psi_1\|_{L^6(B_\frac{1}{\epsilon})} \leq K\epsilon^{-\gamma}\,, \quad 0<\gamma<\frac{1}{3}\,, \\ \| \Psi_{1\,,t}\|_{L^2(B_\frac{1}{\epsilon})} \leq K\epsilon^{-\delta}\,, \quad 0<\delta<2\,, \end{array} \end{equation} \begin{equation} \label{3.2} \begin{array}{c} |\langle \Psi_1\,,\tilde{e}_{j}\rangle | = o_\epsilon(\epsilon^{-2}(\log\frac{1}{\epsilon})^{1/2})\,,\; j= 1, 2\,, \\ \sum_{k=2}^\infty\sum_{j=1}^{J(k)} \epsilon^2\lambda_{k} | \langle \Psi_1\,,\tilde{e}_{j\,,k}\rangle | = o_\epsilon(1)\,, \end{array} \end{equation} \begin{equation}\label{3.20} \begin{array}{c} | \int_{\partial B_{1/\epsilon}} \Psi_1 \cdot \partial_{\vec{n}} \tilde{e}_{j} | = o_\epsilon((\log\frac{1}{\epsilon})^{-1/2})\,,\; j=1, 2\,, \\ \sum_{k=2}^\infty\sum_{j=1}^{J(k)} |\int_{\partial B_{1/\epsilon}} \Psi_1 \cdot \partial_{\vec{n}} \tilde{e}_{j\,,k} | = o_\epsilon(1)\,, \end{array} \end{equation} where $J(k)$ is the multiplicity of the eigenvalue $\lambda_k$, $K > 0$ is a constant and $\partial_{\vec{n}}$ is the normal derivative. Here $\tilde{e}_{j}$'s and $\tilde{e}_{j\,,k}$'s are the eigenfunctions in $\tilde{V}_1=\{a(s)+b(s) e^{2i \phi}\in H_0^1(B_{1/\epsilon} ;{\mathbb C})\}$ of $\tilde{L}_\epsilon$ corresponding to the eigenvalues $\epsilon^2\lambda_1$ and $\epsilon^2\lambda_{k}$'s respectively. We require them to have unit norm in $L^2(B_{1/\epsilon})$. In addition $\langle\cdot\,,\cdot\rangle$ is the inner product in $L^2$. Note that the upper bound of (\ref{3.1}) and the first term of (\ref{3.2}) tend to infinity as $\epsilon$ goes to zero. >From \cite{H1}, the equation (\ref{1.6}) is well posed. Then $\Psi_1$ is smooth in both space and time variables. Hence (\ref{3.1}), (\ref{3.2}) and (\ref{3.20}) can be fulfilled in a short time when $\Psi_1|_{t=0}$ satisfies the following assumption. \medskip \noindent {\bf Assumption~I:} {\it $\Psi_1=\Psi_1(X, t,\epsilon)$ satisfies the following: \begin{description} \item{(1)} $\Psi_1(\cdot, 0, \epsilon)$ has sufficiently small $C^1$ norm on the vortex core $\{X : |X|\leq\frac{2}{\epsilon} \}$ at $t=0$ \item{(2)} $\|\partial_t \Psi_1(\cdot, 0, \epsilon)\|_{L^2 (B_{\frac{2}{\epsilon}})} = O(\epsilon^{-\delta})$, $0<\delta<2$. \end{description} } By the suitable choice of initial data $u_0$, we obtain Assumption~I(1). Assumption~I(2) preserves the vortex structure on the vortex core when the associated vortex point begins to move at the time $t=0$. Note that the upper bound of Assumption~I(2) is $\sim \epsilon^{-\delta}$, with $0<\delta<2$ which tends to infinity as $\epsilon$ goes to zero. We observe that Assumption~I is a local perturbation condition on the vortex core which is different from the global assumption in \cite{L1} and \cite{LX}. In Neu~\cite{N} and E~\cite{E}'s works, they used a specific asymptotic expansion formula and a pointwise matching condition to derive the dynamic laws of vortices. Basically, they require some pointwise matching condition on the boundary of vortex cores to derive the dynamic laws of vortices. However, (\ref{3.1}), (\ref{3.2}) and (\ref{3.20}) are not pointwise. This is a kind of generalization for the results of Neu and E. Now we prove (\ref{1.11}) as follows. By (\ref{1.9}) and (\ref{1.10}), (\ref{1.8}) becomes \begin{eqnarray} \label{3.3} \lefteqn{ \dot{q}_\epsilon\cdot(\nabla_{X}\Psi_0+\epsilon\nabla_{X}\Psi_1) - i\epsilon\Psi_0 H_t - i\epsilon^2\Psi_1 H_t - \epsilon^2 \Psi_{1, t} } \\ &=& \epsilon\Psi_0 |\nabla_x H|^2 - 2i(\nabla_{X}\Psi_0 \cdot \nabla_x H) + \tilde{L_\epsilon} \Psi_1 + \hat{N}_\epsilon (\Psi_1) \nonumber \end{eqnarray} for $X \in B_{1/\epsilon} (0)$ and $t > 0$, where \begin{equation}\label{6.1} \begin{array}{lcl} -\tilde{L_\epsilon}\Psi_1&=&\Delta_X \Psi_1 + (1 - |\Psi_0|^2)\Psi_1 - 2(\Psi_0 \cdot \Psi_1)\Psi_0\,,\\ \hat{N}_\epsilon (\Psi_1)&=&\epsilon^2\Psi_1|\nabla_x H|^2 - 2i\epsilon(\nabla_{X}\Psi_1 \cdot \nabla_x H) + \epsilon^2 |\Psi_1|^2 \Psi_1 \\ & &+\epsilon[ 2(\Psi_0\cdot\Psi_1)\Psi_1+|\Psi_1|^2\Psi_0 ]\,, \end{array} \end{equation} and $\Psi_{1, t} = \frac{\partial}{\partial t} \Psi_1(\cdot,t,\epsilon)$. Assume that \begin{equation} \label{3.4} \tilde{w}_j = \frac{1}{\Gamma_{\epsilon j}} \partial_{X_j}\Psi_0\,, \quad \Gamma_{\epsilon j} = \| \partial_{X_j}\Psi_0 \|_{L^2} = (\pi \log\frac{1}{\epsilon} + O(1))^{1/2}\,,\quad j=1,2\,. \end{equation} Hereafter, we use $X_j$ to denote the components of $X= (X_1,X_2)$. Then (\ref{3.3}) implies \begin{eqnarray} \lefteqn{ \dot{q}_{\epsilon, 1}(\partial_{X_1} \Psi_0 + \epsilon \partial_{X_1} \Psi_1) + \dot{q}_{\epsilon, 2} (\partial_{X_2} \Psi_0 + \epsilon \partial_{X_2} \Psi_1) -i \epsilon \Psi_0 H_t - i \epsilon^2 \Psi_1 H_t - \epsilon^2 \Psi_{1, t} }\nonumber \\ &=& \epsilon \Psi_0 |\nabla_x H|^2 - 2i(\partial_{X_1} \Psi_0 \partial_{x_1} H + \partial_{X_2} \Psi_0 \partial_{x_2} H) + \tilde{L_\epsilon} \Psi_1 + \hat{N}_\epsilon(\Psi_1) \hspace{1cm}\label{3.5} \end{eqnarray} for $X \in B_{1/\epsilon}(0)$ and $t> 0$. Making the inner product with (\ref{3.5}) and $\tilde{w}_j$, $j = 1, 2$, we have \begin{equation} \label{3.6} \begin{array}{ll} &\dot{q}_{\epsilon, 1}[\langle \partial_{X_1} \Psi_0, \tilde{w}_j\rangle +\epsilon\langle \partial_{X_1} \Psi_1, \tilde{w}_j\rangle ] + \dot{q}_{\epsilon, 2}[\langle \partial_{X_2} \Psi_0, \tilde{w}_j\rangle +\epsilon\langle \partial_{X_2} \Psi_1, \tilde{w}_j\rangle ] \\ &= \epsilon^2(\langle \Psi_{1, t}\,,\tilde{w}_j\rangle +\langle i\Psi_1 H_t\,,\tilde{w}_j\rangle ) + \langle \hat{N}_\epsilon(\Psi_1), \tilde{w}_j\rangle + \Gamma_j + \langle \tilde{L}_\epsilon \Psi_1, \tilde{w}_j\rangle \,, \end{array} \end{equation} for $j = 1, 2$, where $\langle\cdot, \cdot\rangle$ is the inner product in $L^2 ( B_{1/\epsilon} (0);{\mathbb C} )$, and \begin{equation} \label{3.7} \begin{array}{lll} \Gamma_j&=&-2\langle i\partial_{X_1} \Psi_0 \partial_{x_1} H\,,\tilde{w}_j\rangle - 2\langle i\partial_{X_2} \Psi_0 \partial_{x_2} H\,,\tilde{w}_j\rangle \\ & &+ \epsilon\langle \Psi_0|\nabla_x H|^2\,,\tilde{w}_j\rangle + \epsilon\langle i\Psi_0 H_t\,,\tilde{w}_j\rangle \,. \end{array} \end{equation} Since the eigenfunctions $\tilde{e}_{j}, \tilde{e}_{j\,,k}$'s dense in $\Lambda_1=\{w(X)= a(s)+b(s) e^{2i \phi}\in L^2(B_{1/\epsilon}(0) ; {\mathbb C})\}$ and $\tilde{w}_l\in \Lambda_1, l=1,2$, then \begin{equation}\label{3.21} \tilde{w}_l= \sum_{j=1}^2 \langle \tilde{w}_l\,,\tilde{e}_j\rangle \tilde{e}_j + \sum_{k=1}^\infty \sum_{j=1}^{J(k)} \langle \tilde{w}_l\,, \tilde{e}_{j\,,k}\rangle \tilde{e}_{j\,,k}\,. \end{equation} Using integration by parts, we have \begin{equation}\label{3.22} \begin{array}{lll} \langle \tilde{L}_\epsilon \Psi_1\,,\tilde{e}_{j}\rangle &= \int_{\partial B_{1/\epsilon}} \Psi_1 \cdot \partial_{\vec{n}} \tilde{e}_{j} + \epsilon^2 \lambda_1 \langle \Psi_1\,,\tilde{e}_{j}\rangle \,,\\ \langle \tilde{L}_\epsilon \Psi_1\,,\tilde{e}_{j\,,k}\rangle &= \int_{\partial B_{1/\epsilon}} \Psi_1 \cdot \partial_{\vec{n}} \tilde{e}_{j\,,k} + \epsilon^2 \lambda_{k} \langle \Psi_1\,,\tilde{e}_{j\,,k}\rangle \,. \end{array} \end{equation} Hence by Proposition I, (\ref{3.2}), (\ref{3.20}), (\ref{3.21}) and (\ref{3.22}), we obtain \begin{equation} \label{3.8} \begin{array}{lll} \langle \tilde{L}_\epsilon \Psi_1\,, \tilde{w}_l\rangle &= \sum_{j=1}^2 (\epsilon^2 \lambda_1 \langle \Psi_1\,, \tilde{e}_{j}\rangle + \int_{\partial B_{1/\epsilon}} \Psi_1 \cdot \partial_{\vec{n}} \tilde{e}_{j}) \langle \tilde{e}_{j}\,,\tilde{w}_l\rangle \\ &~~+ \sum_{k=2}^\infty\sum_{j=1}^{J(k)} (\epsilon^2\lambda_{k} \langle \Psi_1\,,\tilde{e}_{j\,,k}\rangle + \int_{\partial B_{1/\epsilon}} \Psi_1 \cdot \partial_{\vec{n}} \tilde{e}_{j\,,k}) \langle \tilde{e}_{j\,,k}\,,\tilde{w}_l\rangle \\ &= o_\epsilon((\log\frac{1}{\epsilon})^{-1/2})\,,\quad l=1, 2\,. \end{array} \end{equation} It is easy to check that \begin{equation} \label{3.9} \begin{array}{lcl} \partial_{X_1} \Psi_0 &=& \frac{1}{2}(\frac{1}{s}f_0 +f_0') + \frac{1}{2}(f_0' -\frac{1}{s}f_0) e^{2i \phi}\,,\\[5pt] \partial_{X_2} \Psi_0 &=& \frac{i}{2}(\frac{1}{s}f_0 +f_0') + \frac{i}{2}(\frac{1}{s}f_0 -f_0') e^{2i \phi}\,. \end{array} \end{equation} Hence $\langle\partial_{X_1} \Psi_0, \tilde{w}_2\rangle = \langle \partial_{X_2} \Psi_0, \tilde{w}_1\rangle = 0\,.$ Thus (\ref{3.6}) and (\ref{3.8}) imply that \begin{equation} \label{3.10} \begin{array}{lcl} \dot{q}_{\epsilon, 1}(\alpha_*+\epsilon\alpha_1) + \epsilon\dot{q}_{\epsilon, 2}\beta_1 &=& \gamma_1\,,\\ \epsilon\dot{q}_{\epsilon, 1}\beta_2 + \dot{q}_{\epsilon, 2}(\alpha_*+\epsilon\alpha_2) &=& \gamma_2\,,\\ \end{array} \end{equation} where $$ \begin{array}{c} \alpha_* = \Gamma_{\epsilon j} = \| \partial_{X_j} \Psi_0\|_{L^2}\,,\quad \alpha_j = \langle \partial_{X_j}\Psi_1, \tilde{w}_j\rangle \,, \\ \beta_1 =\langle\partial_{X_2}\Psi_1, \tilde{w}_1\rangle , \quad \beta_2 =\langle\partial_{X_1}\Psi_1, \tilde{w}_2\rangle , \quad \eta_{j} =\langle\hat{N}_\epsilon(\Psi_1)\,,\tilde{w}_j\rangle \,, \end{array} $$ and $$ \gamma_j= \Gamma_j+\epsilon^2(\langle\Psi_{1,t}\,,\tilde{w}_j\rangle +\langle i \Psi_1 H_t\,,\tilde{w}_j\rangle )+\eta_j +o_\epsilon((\log\frac{1}{\epsilon})^{-1/2})\,, $$ for $j = 1, 2$. By (\ref{1.10}), (\ref{3.1}) and H\"{o}lder inequality, we have \begin{equation} \label{3.11} \begin{array}{c} \epsilon |\alpha_j| \leq K \epsilon^{1-\beta}\,,\quad \epsilon |\beta_j| \leq K \epsilon^{1-\beta}\,, \quad 0<\beta<1\,, \\ |\eta_j| = o_\epsilon((\log \frac{1}{\epsilon})^{-1/2})\,, \quad \gamma_j = \Gamma_j + o_\epsilon((\log \frac{1}{\epsilon})^{-1/2})\,, \quad j=1, 2\,. \end{array} \end{equation} Moreover, (\ref{3.10}) implies \begin{equation} \label{3.12} \dot{q}_{\epsilon, 1}=\frac{1}{\gamma}[ (\alpha_*+\epsilon\alpha_2)\gamma_1 - \epsilon\beta_1\gamma_2 ]\,,\quad \dot{q}_{\epsilon, 2}=\frac{1}{\gamma}[ (\alpha_*+\epsilon\alpha_1)\gamma_2 - \epsilon\beta_2\gamma_1 ]\,, \end{equation} where $\gamma = \alpha_*^2+\epsilon\alpha_*(\alpha_1+\alpha_2)+\epsilon^2(\alpha_1\alpha_2-\beta_1\beta_2)$. Furthermore by (\ref{1.10}), (\ref{3.9}), and the mean-value theorem of harmonic functions, we have \begin{equation} \label{3.13} \begin{array}{c} \langle i \partial_{X_j}\Psi_0 \partial_{x_j}H\,,\tilde{w}_j \rangle= 0\,, \quad j=1, 2\,, \\ \langle i \partial_{X_1}\Psi_0 \partial_{x_1}H\,,\tilde{w}_2 \rangle = \frac{\pi}{\Gamma_{\epsilon 2}} \partial_{x_1}H(q_\epsilon\,,t\,, \epsilon) f_0^2(\frac{1}{\epsilon})\,, \\ \langle i \partial_{X_2}\Psi_0 \partial_{x_2}H\,,\tilde{w}_1 \rangle = -\frac{\pi}{\Gamma_{\epsilon 1}} \partial_{x_2}H(q_\epsilon,t,\epsilon) f_0^2(\frac{1}{\epsilon})\,, \\ |\epsilon \langle \Psi_0|\nabla_x H|^2\,,\tilde{w}_j \rangle | \leq \frac{\epsilon C}{\Gamma_{\epsilon j}}\,, \quad C = 2\pi K^2 \sup_{0<\epsilon<1} \int_0^{1/\epsilon} s f_0 f_0'\,ds > 0\,, \\ \epsilon \langle i \Psi_0 H_t\,,\tilde{w}_1 \rangle = -\frac{\pi}{\Gamma_{\epsilon 1}} (\epsilon^2 \int_0^{1/\epsilon} s f_0^2\,ds) \partial_{x_2} H_t(q_\epsilon\,,t\,,\epsilon)\,, \\ \epsilon \langle i \Psi_0 H_t\,,\tilde{w}_2 \rangle = \frac{\pi}{\Gamma_{\epsilon 2}} (\epsilon^2 \int_0^{1/\epsilon} s f_0^2\,ds) \partial_{x_1} H_t(q_\epsilon\,,t\,,\epsilon)\,. \end{array} \end{equation} Hence by (\ref{1.3}), (\ref{1.4}), (\ref{3.7}), and (\ref{3.13}), we obtain \begin{equation}\label{3.14} \begin{array}{rcl} \Gamma_1&=& \frac{\pi}{\Gamma_{\epsilon 1}} (2\partial_{x_2} H(q_\epsilon\,,t\,,\epsilon) - c_\epsilon\partial_{x_2} H_t(q_\epsilon\,,t\,,\epsilon)) + O(\epsilon (\log\frac{1}{\epsilon}) ^{-1/2})\,, \\ \Gamma_2&=& -\frac{\pi}{\Gamma_{\epsilon 2}} (2\partial_{x_1} H(q_\epsilon\,,t\,,\epsilon) - c_\epsilon\partial_{x_1} H_t(q_\epsilon\,,t\,,\epsilon)) + O(\epsilon (\log\frac{1}{\epsilon}) ^{-1/2})\,, \end{array} \end{equation} where $c_\epsilon = \epsilon^2 \int_0^{1/\epsilon} s f_0^2\,ds > 0$. Note that by (\ref{1.3}) and (\ref{1.4}), $c_\epsilon \sim 1/2$ as $\epsilon\to 0+\,.$ Thus by (\ref{3.4}), (\ref{3.11}), (\ref{3.12}), and (\ref{3.14}), we complete the proof of (\ref{1.11}). For the motion of $d$-vortices, we restrict (\ref{1.5}) on the vortex cores $B_1(q_{j \epsilon})$'s and we consider the following system of equations. $$ \begin{array}{c} u_t= \Delta u + \frac{1}{\epsilon^2} u (1- |u|^2) \quad\hbox{for } x \in B_1 (q_{j \epsilon}(t)),\; t > 0 , \\ u|_{t=0}= u_{j 0}(x) \quad\hbox{ for } x \in B_1 (q_{j \epsilon}(0))\,, \end{array} $$ where $x=q_{j \epsilon}(t)$ is the $j$-th vortex trajectory, $q_{j \epsilon}$ is smooth in $t$, $|q_{j \epsilon} - q_{k \epsilon}| > 2$ for $j \neq k$, and $B_1(q_{j \epsilon}(t))$ is the $j$-th vortex core which moves along with the $j$-th vortex trajectory $x=q_{j \epsilon}(t), j=1,\dots,d$. Now we assume that \begin{equation} \label{1.12} X^j=\frac{x-q_{j \epsilon}(t)}{\epsilon}\,,\quad \Psi(X^j,t,\epsilon)=u(x,t,\epsilon) \end{equation} for $x\in B_1(q_{j \epsilon}(t))$ i.e. $X^j\in B_{1/\epsilon}(0)\,,j=1,\dots,d\,.$ Like (\ref{1.9}), we take a similar expansion form of $\Psi$ on each vortex core as follows. \begin{equation} \label{1.13} \Psi(X^j,t,\epsilon) =\Psi_0(X^j) e^{i H_j} + \epsilon \Psi_1(X^j,t,\epsilon) e^{i H_j} \hbox{ for } X^j\in B_{1/\epsilon}(0)\,, \\ \end{equation} where $$ \Psi_0(X^j) = f_0(s_j) e^{i \phi_j}\,, s_j= |X^j|, \phi_j= \arg X^j\,. $$ Here we assume that \begin{equation} \label{1.14} \begin{array}{c} H_j=\sum_{k\neq j} \phi_k + H\,,\Delta_x H=0 \quad \hbox{ in } B_1(q_{j \epsilon}(t))\,, \\ |\nabla_x H|\,, |H_t|\,, |\nabla_x H_t|\leq K \quad \hbox{ in } B_1(q_{j \epsilon}(t))\,. \end{array} \end{equation} By the same argument as (\ref{1.11}), we obtain the equations of $q_{j \epsilon}$ as follows. \begin{equation} \label{1.15} \begin{array}{lcl} \dot{q}_{j \epsilon} &=& \frac{1}{\log\frac{1}{\epsilon}+O(1)} [2J \nabla_x H_j(q_{j \epsilon}\,,t\,,\epsilon) -c_\epsilon J \nabla_x\partial_t H_j(q_{j \epsilon}\,,t\,,\epsilon)+ o_\epsilon(1)]\,,\\ &=& \frac{1}{\log\frac{1}{\epsilon}+O(1)} [-2 \nabla_x \tilde{H}_j(q_{j \epsilon}\,,t\,,\epsilon) +c_\epsilon \nabla_x\partial_t \tilde{H}_j(q_{j \epsilon}\,,t\,,\epsilon)+ o_\epsilon(1)]\,, \end{array} \end{equation} where $H_j, \tilde{H}_j's$ are harmonic conjugates. Here we require that $\Psi_1$ satisfies the "small" perturbation conditions on each vortex core $|X^j|\leq \frac{1}{\epsilon}$, $j= 1,\dots, d$ as follows. \begin{equation} \label{3.15} \begin{array}{c} \|\nabla_{X^j} \Psi_1\|_{L^2(B_\frac{1}{\epsilon})} \leq K \epsilon^{-\beta}\,,\quad 0<\beta<1\,, \\ \| \Psi_1\|_{L^6(B_\frac{1}{\epsilon})} \leq K \epsilon^{-\gamma}\,, \quad 0<\gamma<\frac{1}{3}\,, \\ \| \Psi_{1\,,t}\|_{L^2(B_\frac{1}{\epsilon})} \leq K \epsilon^{-\delta}\,,\quad 0<\delta<2\,, \\ \end{array} \end{equation} \begin{equation} \label{3.16} \begin{array}{c} |\langle\Psi_1\,,\tilde{e}_{m}\rangle| = o_\epsilon(\epsilon^{-2}(\log\frac{1}{\epsilon})^{1/2})\,,\quad m= 1, 2\,, \\[5pt] \sum_{k=2}^\infty\sum_{j=1}^{J(k)} \epsilon^2\lambda_{k} |\langle \Psi_1\,,\tilde{e}_{m\,,k}\rangle | = o_\epsilon(1)\,, \end{array} \end{equation} \begin{equation}\label{3.17} \begin{array}{c} |\int_{\partial B_{1/\epsilon}} \Psi_1 \cdot \partial_{\vec{n}} \tilde{e}_{m} | = o_\epsilon((\log\frac{1}{\epsilon})^{-1/2})\,,\quad m=1, 2\,, \\[5pt] \sum_{k=2}^\infty\sum_{m=1}^{J(k)} |\int_{\partial B_{1/\epsilon}} \Psi_1 \cdot \partial_{\vec{n}} \tilde{e}_{m\,,k} | = o_\epsilon(1)\,, \end{array} \end{equation} where $J(k)$ is the mutliplicity of the eigenvalue $\lambda_k$ and $K> 0$ is a constant. In particular, suppose $H \equiv 0$. Then (\ref{1.14}) and (\ref{1.15}) imply that \begin{equation} \label{1.16} \dot{q}_{j \epsilon} = \frac{-2}{\log\frac{1}{\epsilon}} \nabla_x \tilde{H}_j(q_{j \epsilon}\,,t\,,\epsilon) + o_\epsilon(1)\,. \end{equation} Note that the equation (\ref{1.16}) is consistent with the governing equation in \cite{E} and \cite{LX1}. \section{Vortex dynamics in nonlinear Schr\"{o}dinger equations} \setcounter{equation}{0} The other application of Theorem~I and Proposition~I is for the dynamics of vortices in the nonlinear Schr\"{o}dinger equation. The system of nonlinear Schr\"{o}dinger equations is as follows. \begin{equation} \label{1.18} \begin{array}{c} -i u_t= \Delta u + \frac{1}{\epsilon^2} u (1- |u|^2) \quad\hbox{ for } x \in {\mathbb R}^2, t > 0 , \\ u|_{t=0}= u_0(x) \quad\hbox{ for } x \in {\mathbb R}^2\,. \end{array} \end{equation} The equation (\ref{1.18}) is a fundamental equation for understanding superfluids, see Ginzburg and Pitaevskii~\cite{GP}, Landau and Lifschitz~\cite{LL1}, Donnelly~\cite{D}, Frisch, Pomeau and Rica~\cite{FPR}, Josserand and Pomeau~\cite{JP}, and many others. For the vortex dynamics in a superfluid, we prove rigorously the asymptotic motion equation of a vortex from a solution of (\ref{1.18}) with some specific conditions. The method of our proof is more generalized than the formal asymptotic analysis in the dynamics of fluid dynamic vortices (cf.~\cite{N}). Fortunately, the asymptotic motion equation has the same leading order term (up to a time scaling) as Neu~\cite{N}'s result. In \cite{B}, superfluid ${}^4 He$ has a much larger Reynolds number than the Reynolds numbers of water and air. Moreover, the helium liquids have kinematic viscosities which are much smaller than those of water. It is well known that the high Reynolds number may cause the turbulent flow (cf. \cite{F}). However, up to now, it is often tacitly assumed that the laminar analysis of \cite{E} and \cite{N} may carry over to turbulent vortex cores, but no theoretical corroboration of that assumption had been available. In this paper, we provide a more generalized and rigorous argument to derive the vortex dynamics in a superfluid. In particular, the constraints imposed concern only global norms of the perturbations from the leading order steady state structure. Hence certain classes of highly unsteady fluctuations are allowed for the constraints. This is an important step to investigate the dynamics of fluid dynamic vortices with turbulent cores. Assume that $x=q_\epsilon(t)=(q_{\epsilon, 1}(t), q_{\epsilon, 2}(t))$ is the vortex trajectory and $q_\epsilon(0)=0$. In addition, we assume that in the vortex core $B_1(q_\epsilon(t))$, the classical solution $u(x, t)$ has only one vortex center at $q_\epsilon(t)$. Now we focus on the vortex core $B_1(q_\epsilon(t))$ and we consider the following equations \begin{equation} \label{1.19} \begin{array}{c} -i u_t= \Delta u + \frac{1}{\epsilon^2} u (1- |u|^2) \quad\hbox{ for } x \in B_1 (q_\epsilon(t)), t > 0 , \\ u|_{t=0}= u_0(x) \quad\hbox{ for } x \in B_1 (0), \end{array} \end{equation} Like (\ref{1.7}) and (\ref{1.9}), we take the same expansion form of the solution $u$ as follows. \begin{equation} \label{6.9} u(x,t,\epsilon) =\Psi(X, t, \epsilon) = \Psi_0(X) e^{i H} + \epsilon \Psi_1(X, t, \epsilon) e^{i H}\,, \end{equation} where $X=\frac{x-q_\epsilon(t)}{\epsilon}$, $\Psi_0(X) = f_0(s) e^{i \phi}$, $s = |X|$, $\phi = \arg(X)$, and $H$ satisfies (\ref{1.10}). We use Theorem~I and Proposition~I to derive the equation \begin{equation}\label{1.20} \begin{array}{lcl} \dot{q_\epsilon} &=& 2\nabla_x H(q_\epsilon\,,t\,,\epsilon) + o_\epsilon(1)\,,\\ &=& 2 J \nabla_x \tilde{H}(q_\epsilon\,,t\,,\epsilon) + o_\epsilon(1)\,, \end{array} \end{equation} provided that the same "small" perturbation conditions (\ref{3.1}), (\ref{3.2}) and (\ref{3.20}) hold. In \cite{S} p.~17, we learned that the equation (\ref{1.18}) is well posed. Then $\Psi_1$ is smooth in both space and time variables. Hence (\ref{3.1}), (\ref{3.2}) and (\ref{3.20}) can be fulfilled in a short time as $\Psi_1|_{t=0}$ satisfies Assumption~I in Section~3. By (\ref{6.9}) and (\ref{1.10}), (\ref{1.19}) becomes \begin{eqnarray} \lefteqn{ -i\dot{q}_\epsilon\cdot(\nabla_{X}\Psi_0+\epsilon\nabla_{X} \Psi_1) - \epsilon\Psi_0 H_t - \epsilon^2\Psi_1 H_t + i\epsilon^2 \Psi_{1, t} }\nonumber \\ &=& \epsilon\Psi_0 |\nabla_x H|^2 - 2i(\nabla_{X}\Psi_0 \cdot \nabla_x H) + \tilde{L_\epsilon} \Psi_1 + \hat{N}_\epsilon (\Psi_1) \label{4.1} \end{eqnarray} for $X \in B_{1/\epsilon} (0)$ and $t > 0$, where $\tilde{L}_\epsilon$ and $\hat{N}_\epsilon$ are defined in (\ref{6.1}). Making the inner product with (\ref{4.1}) and $\tilde{w}_j, j=1,2$ and using (\ref{3.8}), we have \begin{eqnarray} \lefteqn{ -\dot{q}_{{\epsilon, 1}} [\langle i\partial_{X_1}\Psi_0, \tilde{w}_j\rangle+\epsilon\langle i\partial_{X_1}\Psi_1\,,\tilde{w}_j\rangle] }\nonumber \\ \lefteqn{-\dot{q}_{{\epsilon, 2}} [\langle i\partial_{X_2}\Psi_0, \tilde{w}_j\rangle +\epsilon\langle i\partial_{X_2}\Psi_1\,,\tilde{w}_j\rangle ] }\label{4.2}\\ &=&\hat{\Gamma}_j + \epsilon^2 (\langle i\Psi_{1,t}\,,\tilde{w}_j\rangle + \langle \Psi_1 H_t\,,\tilde{w}_j\rangle) + \langle \hat{N}_\epsilon(\Psi_1)\,,\tilde{w}_j\rangle + o_\epsilon((\log\frac{1}{\epsilon})^{-1/2})\,, \nonumber \end{eqnarray} where \begin{equation}\label{4.3} \begin{array}{lll} \hat{\Gamma}_j&=&-2\langle i\partial_{X_1} \Psi_0 \partial_{x_1} H, \tilde{w}_j\rangle - 2\langle i\partial_{X_2} \Psi_0 \partial_{x_2} H\,,\tilde{w}_j\rangle \\ & &+ \epsilon\langle\Psi_0|\nabla_x H|^2,\tilde{w}_j\rangle + \epsilon\langle\Psi_0 H_t\,,\tilde{w}_j\rangle\,. \end{array} \end{equation} By (\ref{3.4}) and (\ref{3.9}), we have $\langle i\partial_{X_1}\Psi_0\,,\tilde{w}_1\rangle= \langle i\partial_{X_2}\Psi_0\,,\tilde{w}_2\rangle= 0$. Hence (\ref{4.2}) becomes \begin{equation}\label{4.4} \begin{array}{llcl} \epsilon\dot{q}_{{\epsilon, 1}} \hat{\alpha}_{12}+\dot{q}_{\epsilon, 2} (\hat{\alpha}_{11}+\epsilon\hat{\beta}_1) &=&-\gamma_1\,, \\ \dot{q}_{{\epsilon, 1}}(\hat{\alpha}_{21}+\epsilon\hat{\beta}_2) +\epsilon\dot{q}_{\epsilon, 2} \hat{\alpha}_{22} &=&-\gamma_2 \end{array} \end{equation} where $$ \begin{array}{llll} &\hat{\alpha}_{11}=\langle i\partial_{X_2}\Psi_0\,,\tilde{w}_1\rangle\,, &\hat{\alpha}_{21}=\langle i\partial_{X_1}\Psi_0\,,\tilde{w}_2\rangle\,, &\hat{\alpha}_{j2}=\langle i\partial_{X_j}\Psi_1\,,\tilde{w}_j\rangle\,, \\ &\hat{\beta}_1=\langle i\partial_{X_2}\Psi_1\,,\tilde{w}_1\rangle\,, &\hat{\beta}_2=\langle i\partial_{X_1}\Psi_1\,,\tilde{w}_2\rangle\,, &\eta_j=\langle\hat{N}_\epsilon(\Psi_1),\tilde{w}_j\rangle\,, \end{array} $$ and $$ \gamma_j=\hat{\Gamma}_j+\epsilon^2(\langle\Psi_1 H_t,\tilde{w}_j\rangle +\langle i\Psi_{1,t},\tilde{w}_j\rangle)+\eta_j+ o_\epsilon((\log\frac{1}{\epsilon})^{-1/2})\,. \\ $$ By (\ref{1.10}), (\ref{3.1}) and (\ref{3.9}), we have \begin{equation}\label{4.5} \begin{array}{c} \hat{\alpha}_{11}= -\pi/\Gamma_{\epsilon 1}\,,\quad \hat{\alpha}_{21}= \pi/\Gamma_{\epsilon 2}\,, \\ \epsilon |\hat{\alpha}_{j 2}|\leq K \epsilon^{1-\beta}\,,\quad \epsilon |\hat{\beta}_{j}|\leq K \epsilon^{1-\beta}\,, \quad 0<\beta<1\,, \\ |\eta_j|= o_\epsilon((\log\frac{1}{\epsilon})^{-1/2})\,,\quad \gamma_j= \hat{\Gamma}_j + o_\epsilon((\log\frac{1}{\epsilon})^{-1/2})\,, \quad j=1, 2\,. \end{array} \end{equation} Furthermore, (\ref{4.4}) implies \begin{equation}\label{4.6} \dot{q}_{\epsilon, 1} (t)=-\frac{1}{\hat{\gamma}}[(\hat{\alpha}_{11}+ \epsilon\hat{\beta_1})\gamma_2 - \epsilon\hat{\alpha}_{22}\gamma_1]\,,\quad \dot{q}_{\epsilon, 2} (t)=-\frac{1}{\hat{\gamma}}[(\hat{\alpha}_{21}+ \epsilon\hat{\beta_2})\gamma_1 - \epsilon\hat{\alpha}_{12}\gamma_2]\,, \end{equation} where $\hat{\gamma} = (\hat{\alpha}_{11}+\epsilon\hat{\beta_1})(\hat{\alpha}_{21}+\epsilon\hat{\beta_2}) - \epsilon^2\hat{\alpha}_{12}\hat{\alpha}_{22}$. Moreover, by (\ref{1.10}), (\ref{3.9}) and the Mean Value Theorem of harmonic functions, we have \begin{equation}\label{4.7} \epsilon \langle \Psi_0 H_t\,,\tilde{w}_j\rangle = \frac{\pi \epsilon^2}{\Gamma_{\epsilon j}} (\int_0^{1/\epsilon} s^2 f_0 f_0'\,ds) \partial_{x_j} H_t(q_\epsilon\,,t\,,\epsilon)\,. \end{equation} >From (\ref{1.3}) and (\ref{1.4}), we obtain that \begin{equation}\label{4.8} \int_0^{1/\epsilon} s^2 f_0 f_0'\,ds = \log\frac{1}{\epsilon} + O(1)\,. \end{equation} Hence by (\ref{4.3}), (\ref{4.7}), (\ref{4.8}) and (\ref{3.13}), we obtain \begin{equation}\label{4.9} \begin{array}{lcl} \hat{\Gamma}_1&=& \frac{\pi}{\Gamma_{\epsilon 1}}[2\partial_{x_2}H(q_\epsilon\,,t\,,\epsilon)+\epsilon^2(\log\frac{1}{\epsilon}+O(1))\partial_{x_1}H_t(q_\epsilon\,,t\,,\epsilon)] \\ && + O(\epsilon(\log\frac{1}{\epsilon})^{-1/2})\,, \\ \hat{\Gamma}_2&=& \frac{\pi}{\Gamma_{\epsilon 2}}[-2\partial_{x_1}H(q_\epsilon\,,t\,,\epsilon)+\epsilon^2(\log\frac{1}{\epsilon}+O(1))\partial_{x_2}H_t(q_\epsilon\,,t\,,\epsilon)] \\ && + O(\epsilon(\log\frac{1}{\epsilon})^{-1/2})\,, \end{array} \end{equation} Thus by (\ref{1.10}), (\ref{4.5}), (\ref{4.6}) and (\ref{4.9}), we complete the proof of (\ref{1.20}). For the motion of $d$-vortices, we restrict (\ref{1.18}) on the vortex cores $B_1(q_{j \epsilon})$'s and we consider \begin{equation} \label{1.21} \begin{array}{c} -i u_t= \Delta u + \frac{1}{\epsilon^2} u (1- |u|^2) \quad\hbox{for } x \in B_1 (q_{j \epsilon}(t)),\; t > 0\,, \\ u|_{t=0}= u_{j 0}(x) \quad\hbox{for } x \in B_1 (q_{j \epsilon}(0))\,, \end{array} \end{equation} where $x=q_{j \epsilon}(t)$ is the $j$-th vortex trajectory, $q_{j \epsilon}$ is smooth to $t$, $|q_{j \epsilon} - q_{k \epsilon}| > 2$ for $j \neq k$, and $B_1(q_{j \epsilon}(t))$ is the $j$-th vortex core which moves along with the $j$-th vortex trajectory $x=q_{j \epsilon}(t), j=1,\dots,d\,.$ By the same argument as (\ref{1.20}) for each vortex core, we obtain the equations of $q_{j \epsilon}$ as follows. \begin{equation}\label{1.22} \dot{q}_{j \epsilon} = 2 J \nabla_x \tilde{H}_j(q_{j \epsilon}\,,t\,,\epsilon) + o_\epsilon(1)\,,j= 1, \dots, d\,, \end{equation} provided that the same \lq\lq small" perturbation conditions (\ref{3.15}), (\ref{3.16}) and (\ref{3.17}) hold. Note that both (\ref{1.20}) and (\ref{1.22}) are consistent with the governing equations in \cite{N} and \cite{LX}. \section*{Appendix: Proof of Lemma~I} \setcounter{equation}{0} It is easy to check that $a_2(s)=\frac{f_0}{s}+f_0'\,, b_2(s)=f_0'-\frac{f_0}{s}$ is a solution of (\ref{2.16}). To obtain $(a_3\,,b_3)$, we let $$ a(s)=e^{-\sqrt{2}s}s^{-1/2} \eta_1(s)\,,\quad b(s)=e^{-\sqrt{2}s}s^{-1/2} \eta_2(s) $$ be the solution of (\ref{2.16}). Then $\eta_1$ and $\eta_2$ satisfy \begin{equation} \label{2.17} \begin{array}{c} \eta_1''-2\sqrt{2}\eta_1' +(3+\frac{1}{4 s^2}-2f_0^2)\eta_1-f_0^2\eta_2 =0\,, \\[5pt] \eta_2''-2\sqrt{2}\eta_2' +(3-\frac{15}{4 s^2}-2f_0^2)\eta_2-f_0^2\eta_1 =0\,. \end{array} \end{equation} Let $u = \eta_1+\eta_2$ and $v = \eta_1-\eta_2\,.$ Then by (\ref{2.10}) and (\ref{2.17}), we have \begin{equation} \label{2.18} \begin{array}{c} u''-2\sqrt{2}u'+\frac{2}{s^2}v+\beta_1(s)u=0\,, \\[5pt] v''-2\sqrt{2}v'+2v+\frac{2}{s^2}u+\beta_2(s)v=0\,, \end{array} \end{equation} where $$ \begin{array}{lcl} \beta_1(s)&=&3-\frac{7}{4 s^2}-3f_0^2 =\frac{5}{4 s^2}+\frac{6}{s^4}+\dots\,,\\[5pt] \beta_2(s)&=&1-\frac{7}{4 s^2}-f_0^2 =-\frac{3}{4 s^2}+\frac{2}{s^4}+\dots\quad\hbox{ as } s\to +\infty\,. \end{array} $$ Now we transform (\ref{2.18}) into the following integral equations $$ \begin{array}{lcl} u(s)&=& 1+\int_s^\infty \beta_1(\zeta)K(s,\zeta) u(\zeta) + \frac{2}{\zeta^2} K(s,\zeta) v(\zeta)\,d\zeta\,,\\[5pt] v(s)&=& \int_s^\infty \beta_2(\zeta)\tilde{K}(s,\zeta) v(\zeta)+\frac{2}{\zeta^2}\tilde{K} (s,\zeta) u(\zeta)\,d\zeta\,, \end{array} $$ where $$ K(s,\zeta)=\frac{1-e^{-2\sqrt{2}(\zeta-s)}}{-2\sqrt{2}}\,, \tilde{K}(s,\zeta)=(s-\zeta)e^{-\sqrt{2}(\zeta-s)}\,, $$ By the iteration method (cf. \cite{J} p.199-209), we set $$ \begin{array}{c} u_0(s)\equiv 1\,,\quad v_0(s)\equiv 0\,,\\[5pt] u_{m+1}(s)= 1+\int_0^\infty \beta_1(\zeta)K(s\,,\zeta)u_m(\zeta) + \frac{2}{\zeta^2} K(s\,,\zeta)v_m(\zeta)\,d\zeta \,,\\[5pt] v_{m+1}(s)= \int_0^\infty \beta_2(\zeta)\tilde{K}(s\,,\zeta)v_m(\zeta) + \frac{2}{\zeta^2} \tilde{K}(s\,,\zeta)u_m(\zeta)\,d\zeta \,, \end{array} $$ for $m= 0, 1, 2, \dots$. Note that $$ \begin{array}{c} \int_s^\infty K(s\,,\zeta) \zeta^{-m}\,d\zeta = O(s^{1-m})\,,\quad \hbox{ for } m\geq 2 \,, \\[5pt] \int_s^\infty \tilde{K}(s\,,\zeta) \zeta^{-m}\,d\zeta = O(s^{-m})\,,\quad \hbox{ for } m\geq 2 \,. \end{array} $$ Then it is easy to deduce that $u_m, v_m$ converge to $u, v$ respectively as $m\to\infty$. Moreover, $(u, v)$ is a solution of (\ref{2.18}), and it satisfies \begin{eqnarray*} u(s)&=& 1-\frac{5}{8\sqrt{2}s}+\frac{\tilde{\alpha}_{32}}{s^2}+ \dots\,,\\ v(s)&=& -\frac{1}{s^2}+\frac{\tilde{\beta}_{33}}{s^3} +\dots \quad\hbox{ as } s\to +\infty\,, \end{eqnarray*} where $\tilde{\alpha}_{j k}, \tilde{\beta}_{j k}$'s are constants. Thus \begin{eqnarray*} \eta_1&=&\frac{1}{2}-\frac{5}{16\sqrt{2}s}+\frac{\alpha_{32}}{s^2}+\dots\,, \\ \eta_2&=&\frac{1}{2}-\frac{5}{16\sqrt{2}s}+\frac{\beta_{32}}{s^2}+\dots \quad\hbox{ as } s\to +\infty\,, \end{eqnarray*} where $\alpha_{j k}, \beta_{j k}$ are constants. Therefore. we obtain the solution $(a_3, b_3)$. To obtain $(a_4, b_4)$, we set $$ s\eta_3= a(s)+b(s)\,, s\eta_4= a(s)-b(s)\,, $$ where $(a, b)$ is the solution of (\ref{2.16}). Then $(\eta_3, \eta_4)$ satisfies $$ \begin{array}{c} \eta_3''-2\eta_3+\frac{3}{s}\eta_3'+\frac{2}{s^2}\eta_4+\beta_3(s)\eta_3 = 0 \,,\\ \eta_4''+\frac{3}{s}\eta_4'+\frac{2}{s^2}\eta_3+\beta_4(s)\eta_4 = 0 \,, \end{array} $$ where $$ \begin{array}{c} \beta_3(s)= 3-3f_0^2-\frac{1}{s^2}= \frac{2}{s^2}+\frac{6}{s^4}+\dots\,,\\ \beta_4(s)= 1-f_0^2-\frac{1}{s^2} = \frac{2}{s^4}+\dots\quad\hbox{as } s\to +\infty\,. \end{array} $$ This is equivalent to the following integral equations $$ \begin{array}{lcl} \eta_3(s)&=& -3\int_s^\infty (\frac{1}{t}\tilde{K}_t(s,t)-\frac{1}{t^2} \tilde{K}(s,t))\eta_3(t)\,dt \\ &&+ \int_s^\infty [\frac{2}{t^2}\eta_4(t)+\beta_3(t)\eta_3(t)] \tilde{K}(s,t)\,dt \,, \\[5pt] \eta_4(s)&=& 1+\frac{1}{2}\int_s^\infty (t^{-2}-s^{-2}) t^2 [ \beta_4(t)\eta_4(t) + \frac{2}{t^2} \eta_3(t)]\,dt\,, \end{array} $$ where $$ \tilde{K}(s,t)= (s-t)e^{-\sqrt{2}(t-s)}\,, \quad \tilde{K}_t(s,t)= -[\sqrt{2}(s-t)+1]e^{-\sqrt{2}(t-s)}\,. $$ By the iteration method, we set $$ \begin{array}{lcl} \eta_{3\,,0}(s)&\equiv& 0\,,\quad \eta_{4\,,0}(s)\equiv 1\,, \\[5pt] \eta_{3\,,m+1}(s)&=& -3\int_s^\infty (\frac{1}{t}\tilde{K}_t(s,t)-\frac{1}{t^2}\tilde{K}(s,t))\eta_{3\,,m}(t)\,dt \\ &&+ \int_s^\infty [\frac{2}{t^2}\eta_{4\,,m}(t)+\beta_3(t) \eta_{3\,,m}(t)]\tilde{K}(s,t)\,dt \,, \\[5pt] \eta_{4\,,m+1}(s)&=& 1+\frac{1}{2}\int_s^\infty (t^{-2}-s^{-2}) t^2 [\beta_4(t)\eta_{4\,,m}(t) + \frac{2}{t^2} \eta_{3\,,m+1}(t)]\,dt \,, \end{array} $$ for $m = 0, 1, 2, \dots$. Now we claim that $(\eta_{3\,,m}\,,\eta_{4\,,m})$ converges to the solution $(\eta_3\,,\eta_4)$. For $(u,v)= (u(s),v(s))$, let $T(u,v)(s)= \Big(\begin{array}{c}Tu(s) \\Tv(s)\end{array}\Big)$, where $$\begin{array}{lcl} Tu(s)&=&-3\int_s^\infty (\frac{1}{t}\tilde{K}_t(s,t) -\frac{1}{t^2}\tilde{K}(s,t))u(t)\,dt\\ &&+ \int_s^\infty [\frac{2}{t^2}v(t)+\beta_3(t)u(t)]\tilde{K}(s,t)\,dt \,, \\[5pt] Tv(s)&=&1+\frac{1}{2}\int_s^\infty (t^{-2}-s^{-2}) t^2 [ \beta_4(t)v(t) + \frac{2}{t^2} Tu(t)]\,dt\,, \end{array} $$ Then we have $\Big(\begin{array}{c} \eta_{3\,,m+1} \\ \eta_{4\,,m+1} \end{array}\Big)= \Big(\begin{array}{c} 0 \\ 1\end{array}\Big) + T(\eta_{3\,,m}\,,\eta_{4\,,m})$; \\ i.e., $\Big(\begin{array}{c} \eta_{3\,,m} \\ \eta_{4\,,m}\end{array}\Big) = (I+T+T^2+\dots+T^m)(0,1)$. Hence it is easy to check that $$ \begin{array}{c} \int_s^\infty (s^{-2}-t^{-2})t^{2-m}\,dt = \frac{2}{(m-2)^2-1} s^{1-m}\,,\quad \hbox{ for } m\geq 4 \,,\\[5pt] \int_s^\infty \tilde{K}_t(s\,,t) t^{-m}\,dt = O(s^{-(m+1)})\,,\quad \hbox{ for } m\geq 2\,,\\[5pt] \int_s^\infty \tilde{K}(s\,,t) t^{-m}\,dt = O(s^{-m})\,,\quad \hbox{ for } m\geq 2 \,. \end{array} $$ Thus we obtain that $$ \begin{array}{c} T(0,1)= \Big(\begin{array}{c} O(s^{-2}) \\ O(s^{-3})\end{array} \Big)\,, \quad T^2(0,1)= \Big(\begin{array}{c} O(s^{-4}) \\ O(s^{-5})\end{array} \Big)\,, \\ T^3(0,1)= \Big(\begin{array}{c} O(s^{-6}) \\ O(s^{-7})\end{array} \Big)\,, \quad T^4(0,1)= \Big(\begin{array}{c} O(s^{-8}) \\ O(s^{-9})\end{array} \Big)\,,\dots\,. \end{array} $$ Thus $$ |T^m(0,1)| \leq C_2 S_0^{-m}\quad\hbox{ for } m\geq 1\,,s\geq S_0\,, $$ where $C_2 > 0$ is a constant and $S_0 > 0$ is a large constant. Therefore $(\eta_{3\,,m},\eta_{4\,,m})$ converges to the solution $(\eta_3,\eta_4)$ as $m\to\infty$. Moreover $$ \eta_3= O(\frac{1}{s^2})\,,\quad\eta_4= 1+O(\frac{1}{s^2})\quad\hbox{ as } s\to\infty\,, $$ and we obtain the solution $(a_4, b_4)= (\frac{1}{2} s(\eta_3+\eta_4)\,,\frac{1}{2} s(\eta_3-\eta_4))$. To obtain $(a_5, b_5)$, we use the change of variable $s=e^t$ and let $\hat{a}(t) = a(e^t), \hat{b}(t) = b(e^t)$. Then (\ref{2.16}) becomes \begin{equation}\label{2.19} \begin{array}{rcl} \hat{a}'' &=& e^{2t}(2f_0^2(e^t)-1)\hat{a} + e^{2t}f_0^2(e^t)\hat{b}\,, \\[5pt] \hat{b}'' &=&e^{2t}f_0^2(e^t)\hat{a} + [e^{2t}(2f_0^2(e^t)-1)+4]\hat{b}\,. \end{array} \end{equation} By (\ref{2.10}), there exists a large constant $T_0 > 0$ such that $$ 2f_0^2(e^t)-1 > 0\quad\hbox{ for } t\geq T_0\,. $$ Let $(\hat{a}_5, \hat{b}_5)$ be the solution of (\ref{2.19}) with initial data $$ ( \hat{a}_5, \hat{a}_5', \hat{b}_5, \hat{b}_5' )(T_0) = (1,1,1,1)\;. $$ Then (\ref{2.19}) implies that $\hat{a}_5, \hat{b}_5$ are positive and increasing for $t\geq T_0$. Moreover by (\ref{2.10}) and (\ref{2.19}), we have $$ \hat{a}_5''(t)\geq e^{2t} \hbox{ for } t\geq T_1\,, $$ where $T_1\geq T_0$ is a large constant. Similarly $\hat{b}_5''(t)\geq e^{2t}$ for $t\geq T_1$. Hence $$ \hat{a}_5(t)\,,\;\hat{b}_5(t)\geq \frac{1}{8} e^{2t}\quad\hbox{ as } t\to +\infty\,. $$ Thus $$ a_5(s)=\hat{a}_5(\ln s),\; b_5(s)=\hat{b}_5(\ln s)\geq \frac{1}{8} s^2\quad\hbox{ as } s\to +\infty\,. $$ Therefore we complete the proof of lemma I. \paragraph{Acknowledgement} The author wants to express his gratitude to Professors F. H. Lin, L. C. Evans, and J. C. Neu for their helpful discussions. \begin{thebibliography}{[C]} \bibitem{BBH} F.\ Bethuel, H.\ Brezis, F.\ Helein, Ginzburg-Landau vortices, \textit{Birkhauser}, 1994. \bibitem{B} T.\ F.\ Buttke, Turbulence and vortices in superfluid Helium, in Vortex methods and vortex motion, K.\ E.\ Gustafson, J.\ A.\ Sethian, ed., \textit{SIAM}, 1991, pp.~171-193. \bibitem{CET} X.\ Chen, C.\ M.\ Elliott, T.\ Qi, Shooting method for vortex solutions of a complex-valued Ginzburg-Landau equation, \textit{ Proc.\ Roy.\ Soci.\ Edinburgh }, \textbf{124A}, p.~1075-1088, 1994. \bibitem{D} R.\ J.\ Donnelly, Quantized Vortices in Helium II, \textit{Cambridge Univ. Press}, Cambridge, 1991. \bibitem{E} W.\ E, Dynamics of vortices in Ginzburg-Landau theories with applications to superconductivity, \textit{ Physica\ D.} \textbf{77} (1994), p.~383-404. \bibitem{F} U.\ Frisch, Turbulence: the legacy of A. N. Kolmogorov, \textit{Cambridge Univ. Press}, Cambridge, 1995. \bibitem{FPR} T.\ Frisch, Y.\ Pomeau, S.\ Rica, Transition to dissipation in a model of superflow, \textit{Phys.\ Rev.\ Lett.}, \textbf{69} (1992), pp.~1644-1647. \bibitem{GP} V.\ L.\ Ginzburg, L.\ P.\ Pitaevskii, On the theory of superfluidity, \textit{Sov.\ Phys.\ JETP} \textbf{7} (1958), pp.585. \bibitem{H1} D.\ Henry, Geometric theory of semilinear parabolic equations, \textit{ lecture notes in Math.}, \textbf{840}, Springer-Verlag, 1981. \bibitem{H} P.~Hagan, Spiral Waves in Reaction Diffusion Equations,\textit{ SIAM J. Appl. Math.} 42 (1982), no. 4, 762--786. \bibitem{HH} R.~M.~Herv\'{e}, M.~Herv\'{e}, \'{E}tude qualitative des solutions r\'{e}elles d'une \'{e}quation diff\'{e}rentielle li\'{e}e \`{a} l'\'{e}quation de Ginzburg-Landau, \textit{Ann.\ Inst.\ Henri Poincar\'{e}}, Vol.~11, no.~4, 1994, p.~427--440. \bibitem{J} F.\ John, Ordinary Differential Equations, \textit{Lecture notes of Courant Institute}, 1965. \bibitem{JP} C.\ Josserand, Y.\ Pomeau, Generation of vortices in a model superfluid $He^4$ by the KP instability, \textit{Europhysics Letters}, \textbf{30} (1995), pp.~43-48. \bibitem{LL1} L.\ D.\ Landau, E.\ M.\ Lifschitz, Fluid Mechanics, Course of theoretical physics, \textbf{6}, 2nd edition, \textit{Pergamon press}, 1989. \bibitem{LL} E.\ H.\ Lieb, M.\ Loss, Symmetry of the Ginzburg-Landau Minimizer in a disc, \textit{ Math.\ Res.\ Lett.} 1 (1994), no. 6, 701-715. \bibitem{L1} F.\ H.\ Lin, Some dynamical properties of Ginzburg-Landau vortices, \textit{ Comm.\ Pure\ Appl.\ Math.}, Vol~XLIX(1996), pp~323-359. \bibitem{LX} F.\ H.\ Lin, J.\ X.\ Xin, On the incompressible fluid limit and the vortex motion law of the nonlinear Schr\"{o}dinger equation, preprint. \bibitem{LX1} F.\ H.\ Lin, J.\ X.\ Xin, On the dynamical law of the Ginzburg-Landau vortices on the plane, preprint. \bibitem{L} T.\ C.\ Lin, The stability of the radial solution to the Ginzburg-Landau equation, \textit{Comm.\ in PDE.}, Vol.~22, no.~3,4, 1997, p.~619--632. \bibitem{M1} C.\ B.\ Morrey, Multiple integral in the calculus of variations, \textit{Springer-Verlag} (1966). \bibitem{M} P.\ Mironescu, On the Stability of Radial Solutions of the Ginzburg-Landau Equation, \textit{J. Funct. Anal.} 130 (1995), no. 2, 334--344. \bibitem{N} J.\ C.\ Neu, Vortices in complex scalar fields, \textit{ Physica\ D.}~\textbf{43} (1990),~385-406. \bibitem{S} W.\ A.\ Strauss, Nonlinear wave equations, \textit{ Conference\ board\ of\ the\ Math.\ Sci.}, \textbf{73}, AMS, 1989. \bibitem{WX} M.\ I.\ Weinstein, J.\ Xin, Dynamic stability of vortex solutions of Ginzburg-Landau and nonlinear Schr\"odinger equations \textit{ Comm.\ Math.\ Phys.}~\textbf{180}:2 (1996)~ 389--428. \end{thebibliography} \medskip \noindent{\sc Tai-Chia Lin} \\ Department of Mathematics \\ National Chung-Cheng University \\ Chia-Yi, Taiwan, ROC \\ e-mail: tclin@math.ccu.edu.tw \end{document}