\documentclass[11pt,twoside]{article}
\usepackage{amsmath, amsthm, amscd, amsfonts, amssymb, graphicx, color}
\usepackage[bookmarksnumbered, colorlinks]{hyperref} \usepackage{float}
\usepackage{lipsum}
\usepackage{afterpage}
\usepackage[labelfont=bf]{caption}
\usepackage[nottoc,notlof,notlot]{tocbibind} 
%\renewcommand\bibname{References}
\def\bibname{\Large \bf  References}
\usepackage{lipsum}
\usepackage{fancyhdr}
\pagestyle{fancy}
\fancyhf{}
\renewcommand{\headrulewidth}{0pt}
\fancyhead[LE,RO]{\thepage}
\thispagestyle{empty}
%\afterpage{\lhead{new value}}

\fancyhead[CE]{E. Aryani, A. Babaei and A. Valinejad} 
\fancyhead[CO]{An approach for solving a system of FSIDEs}



%\topmargin=-1.6cm
\textheight 17.5cm%
\textwidth  12cm %
\topmargin   8mm  %
\oddsidemargin   20mm   %
\evensidemargin   20mm   %
\footskip=24pt     %

\newtheorem{theorem}{Theorem}[section]
\newtheorem{lemma}[theorem]{Lemma}
\newtheorem{proposition}[theorem]{Proposition}
\newtheorem{corollary}[theorem]{Corollary}
\theoremstyle{definition}
\newtheorem{definition}[theorem]{Definition}
\newtheorem{example}[theorem]{Example}
\newtheorem{xca}[theorem]{Exercise}
%\theoremstyle{remark}
\newtheorem{remark}[theorem]{Remark}
\renewenvironment{proof}{{\bfseries \noindent Proof.}}{~~~~$\square$}
\makeatletter
\def\th@newremark{\th@remark\thm@headfont{\bfseries}}
\makeatletter



  
  
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% If you want to insert other packages. Insert them here
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

%\long\def\symbolfootnote[#1]#2{\begingroup%
%\def\thefootnote{\fnsymbol{footnote}}\footnote[#1]{#2}\endgroup}



 \def \thesection{\arabic{section}}
 

\begin{document}
%\baselineskip 9mm
%\setcounter{page}{}
\thispagestyle{plain}
{\noindent Journal of Mathematical Extension \\
Vol. XX, No. XX, (2014), pp-pp (Will be inserted by layout editor)}\\
ISSN: 1735-8299\\
URL: http://www.ijmex.com\\
\vspace*{9mm}

\begin{center}

{\Large \bf 
An accurate approach based on modified hat functions for solving a system of fractional stochastic integro-differential equations}
%{\bf } 


\let\thefootnote\relax\footnote{\scriptsize Received: XXXX; Accepted: XXXX (Will be inserted by editor)}

{\bf E. Aryani, \bf  A. Babaei$^*$\let\thefootnote\relax\footnote{$^*$Corresponding Author}}\vspace*{-2mm}


\vspace{2mm} {\small  Department of Applied Mathematics, University of Mazandaran, Babolsar, Iran} \vspace{2mm}

{\bf A. Valinejad}\vspace*{-2mm}\\
\vspace{2mm} {\small  Department of Computer Science, University of Mazandaran, Babolsar, Iran} \vspace{2mm}

\end{center}

\vspace{4mm}


{\footnotesize
\begin{quotation}
{\noindent \bf Abstract.} In this paper, a numerical method based on modified hat functions (MHFs) is investigated to find an approximate solution for a fractional-order system of stochastic integro-differential equations. The fractional and stochastic operational matrices of integration of these functions are employed to present the numerical approach. By using these operational matrices and properties of MHFs, the considered problem is transformed into a system of algebraic equations which can be easily solved by an iterative method. Also, error analysis of the proposed method is discussed. At the end, the accuracy and effectiveness of this approach are studied by some  numerical examples.
\end{quotation}
\begin{quotation}
\noindent{\bf AMS Subject Classification:} 60H20; 45J05; 26A33; 65C30.

\noindent{\bf Keywords and Phrases:} Modified hat functions, Fractional stochastic integro-differential equations, Brownian motion, Caputo derivative, Operational matrix.
\end{quotation}}


\section{Introduction}

In recent years, fractional integral equations and fractional differential equations have gained popularity for modelling widespread fields of science such as control theory of dynamical systems, electrical networks, optics, signal processing, biology and so on [1--9].
%\cite{Podlubny,Kilbas,diethlm,bal11,khalil,rezapour1,tuan,rezapour2,jafari5}. 
Thus, numerous numerical methods have been proposed for solving these classes of equations, for example hybrid collocation method \cite{Ma}, least squares method \cite{Jabari}, finite volume element method \cite{jafari1} and operational matrix of Genocchi polynomials \cite{jafari2} and  Bernstein polynomials \cite{jafari3}.

Deterministic differential equations have an important role in application of mathematics to many branches of science. But in general, more realistic formulations of deterministic equations that have wide efficiency in sciences  are encounter with random noise or uncertainties. This phenomenon generates stochastic differential equations \cite{Kloden,Oksendal}. In other words, stochastic differential equations arise when a random noise is introduced into deterministic differential equations.

For accurate describing these events with mathematical modelings, various kinds of stochastic differential equations or stochastic integro-differential equations have been utilized. A well known example of stochastic processes is  Browninan motion that is a random process of particle when it encounters with fluid molecules \cite{Oksendal}. Wide range of problems in sciences such as economics, medicine and physics  have been simulated as fractional stochastic integral equations \cite{bia,fallah,meer}. In many cases, analytical solution of fractional stochastic integro-differential and integral equations are not known. So, several researchers have been influenced to obtain numerical  approaches for these  equations, such as 
Monte-Carlo Galerkin Approximation \cite{badr}, Galerkin method based on orthogonal polynomials  \cite{Kamrani}, expansion method  \cite{ Khodabin}, Block pulse approximation \cite{Asghari}, operational matrix of the Chebyshev wavelets  \cite{Mohammadi}, spectral collocation method \cite{Taheri},   operational matrices of hybrid of block-pulse and parabolic functions method  \cite{Mirzaee1}, computational scheme based on B-spline interpolation method \cite{Mogh} and so on.

In this work, we consider a system of fractional stochastic integro-differential equations (FSIDEs) as
\begin{eqnarray}\label{eq1}
\mathbf{D}_{0,t}^{\mathbf{\alpha}}\mathbf{Y}(t)=\mathbf{F}(t)+\mu \mathbf{Y}(t)&+&\int_{0}^{t}\mathbf{k}(t,s)\mathbf{Y}(s) \mathrm{d}{s} \nonumber \\
&+&\sigma \int_{0}^{t}\mathbf{\hat {k}}(t,s)\mathbf{Y}(s)\mathrm{d}{\mathcal{B}(s)} ,\quad t\in \Omega,
\end{eqnarray}
with the initial condition
\begin{equation}\label{eq2}
\mathbf{Y}(0)=\Lambda_{\text{int}},
\end{equation}
where  $ \sigma $ is a positive real constant, $ \Omega := [0,T] $ and  the vectors in the system (\ref{eq1}) are defined  as follows:
\begin{eqnarray*}
\mathbf{Y}(t)=\left[y_{1}(t),\dots , y_{i}(t), \cdots ,y_{m}(t) \right]^{T}, \\
\mathbf{F}(t)=\left[f_{1}(t),\dots ,f_{i}(t), \dots ,f_{m}(t) \right]^{T}.
\end{eqnarray*}
and  the matrices of the system (\ref{eq1}) are defined  by
\begin{eqnarray*} \label{eq300}
\mathbf{\mu}=\text{diag} \left [\mu_{1},\cdots , \mu_{i},\dots ,\mu_{m}\right ],\qquad\qquad\quad\\
\qquad\mathbf{k}(t,s)=\Big [ k_{ij}(t,s)\Big ]_{m\times m},\qquad i,j=1,2,\dots ,m,  \\
\mathbf{\hat{k}}(t,s)=\left[\hat{ k}_{ij}(t,s)\right]_{m\times m},\qquad i,j=1,2,\dots ,m,
\end{eqnarray*}
The vector of  initial conditions $ \Lambda_{\text{int}} $ in (\ref{eq2}) is defined  as:
\begin{eqnarray*}\label{ew1}
\Lambda_{\text{int}}:=\left[d_{1},\dots ,d_{i}, \cdots ,d_{m} \right]^{T},
\end{eqnarray*}
where $ d_{i},~i=1,...,m, $ are  real constants. Furthermore, the operator $ \mathbf{D}_{0,t}^{\mathbf{\alpha}}[\cdot] $ denotes  the vector of  Caputo fractional derivative defined as  \cite{Podlubny}:
\[\mathbf{D}_{0,t}^{\mathbf{\alpha}}[\cdot]:=\Big [{D}_{0,t}^{{\alpha_{1}}}[\cdot], \cdots , {D}_{0,t}^{{\alpha_{i}}}[\cdot], \cdots , {D}_{0,t}^{{\alpha_{m}}}[\cdot]\Big ]^{T},\]
where
\begin{equation}\nonumber
D_{0,t}^{\alpha_{i}}g(t)=\frac{1}{\Gamma(1-\alpha_{i})}\int _{0}^{t}\frac{g'(s)}{(t-s)^{\alpha_{i}}}\mathrm{d}{s}, \qquad \alpha_{i}\in (0,1),
\end{equation}
and $ \Gamma (\cdot) $ represents the Gamma function.
Let $(\Omega ,\mathcal{F},p)$ is a probability space with a normal filtration $(\mathcal{F}_{t})_{t\geq 0}$, and $\mathcal{B}=\lbrace\mathcal {B}(t),~ t\geq 0\rbrace$ is a Brownian motion. Moreover, the functions $f_{i}(t),~i=1,...,m$,  $k_{ij}(t,s)$ and $\hat{k}_{ij}(t,s),~i,j=1,...,m,$ for  $ t,s \in\Omega $, are  known stochastic processes defined on the same probability space $(\Omega ,\mathcal{F},p)$
and $y_{i}(t)$ are the unknown functions to be found. To solve the problem (\ref{eq1})-(\ref{eq2}), we apply the operational matrices of modified hat functions (MHFs). By using these operational matrices, this problem will be reduced into a system of algebraic equations.


The outline of the paper is as follows: In Section 2, some basic definitions of fractional calculus, the definition of modified hat functions, their properties and operational matrices of integration based on MHFs are introduced. The numerical method is described in Section 3. Error estimate of the proposed method is investigated in Section 4. Some numerical implementations are presented in Section 5 to illustrate the accuracy of our method. Finally, conclusion of this work is included in Section 6.

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Preliminary concepts and tools}
In this section, some useful concepts and tools that are used during this paper will be introduced.
\subsection{Fractional calculus}
First, we consider some definitions and properties of the Caputo derivative and the Riemann-Liouville integral.
%that are used in this paper are introduced.
\begin{definition}
\cite{Podlubny} %The left-sided fractional derivative of order $\alpha$ in the sense of the Caputo definition with the lower terminal fixed at zero is defined by
The Riemann-Liouville fractional integral operator of order $\alpha \geq 0$, is defined as
\begin{equation*}
\mathcal{I}^{\alpha}g(t)=\frac{1}{\Gamma (\alpha)}\int _{0}^{t}(t-s)^{\alpha -1} g(s) \mathrm{d}{s},~~ t>0.
\end{equation*}
\end{definition}
%\begin{definition}
%\cite{Podlubny} The Caputo's fractional derivative of order $ \alpha $, is defined as
%\begin{equation}
%D^{\alpha}g(t)=\frac{1}{\Gamma(r-\alpha)}\int _{0}^{t}\frac{g^{(r)}(s)}{(t-s)^{\alpha +1 -r}}ds,~~r-1<\alpha \leq r, r\in \Bbb{N}.
%\end{equation}
%\end{definition}
\begin{lemma}  \cite{Podlubny} Assume that $r \in \Bbb{N}$, $r-1<\alpha\leq r$ and $g \in C^{r-1}[0,b]$, where b is a positive real constant. Then
\begin{equation*}
\begin{split}
\mathcal{I}^{\alpha}(\mathcal{I}^{\beta}g(t))&=\mathcal{I}^{\beta}(\mathcal{I}^{\alpha}g(t))=\mathcal{I}^{\alpha +\beta}g(t),\\
\mathcal{I}^\alpha(D^\alpha g(t))&=g(t) - \sum\limits_{k = 0}^{r- 1} \frac{t^{k}}{k!}g^{(k)}(0^{+}),\\
D^{\alpha}(D^{r}g(t))&=D^{r}(D^{\alpha}g(t))=D^{r+\alpha}g(t).
\end{split}
\end{equation*}
\end{lemma}

\subsection{Properties of MHFs}
%\subsection{Definitions of modified hat functions}
Hat functions, sometimes called tent or triangular functions, are continuous functions defined on the interval $ [0,T] $ as \cite{tripathi}
\begin{equation}
\psi _{0}(t)=
\begin{cases}\nonumber
1-\frac{t}{h},\quad\quad t\in [0,h] ,\qquad\qquad\quad\\
\\
0,~~\quad \quad \quad  otherwise,\qquad\qquad\qquad\quad\qquad\qquad\qquad\qquad\quad\quad
\end{cases}
\end{equation}
\begin{equation}
\psi _{i}(t)=
\begin{cases}\nonumber
\frac{t}{h}-(i-1), \quad\quad  t\in [ (i-1)h, ih],\qquad\quad\\
\\
(i+1) -\frac{t}{h},\quad \quad  t\in [ ih, (i+1)h] ,\qquad\quad i=1,2,\dots ,n-1,\\
\\
0,\quad \quad \qquad\quad\qquad otherwise,\qquad\qquad\qquad\qquad\quad
\end{cases}
\end{equation}
\begin{equation}
\psi _{n}(t)=
\begin{cases}\nonumber
\frac{t-T}{h}+1,\quad\qquad t\in [T-h, T],\quad\qquad\quad\\
\\
0,~\quad \quad \quad\quad \quad \quad otherwise,\qquad\qquad\qquad\qquad\qquad\qquad\qquad
\end{cases}
\end{equation}
where $h=\frac{T}{n}$.

Now, let $n\geq 2$ is an even integer and $h=\frac{T}{n}$. MHFs are a modificaion of hat functions and defined  as follows \cite{Nemati}:
\begin{equation}
\theta _{0}(t)=
\begin{cases}\nonumber
\frac{1}{2h ^{2}}(t-h)(t-2h),\quad\quad t\in [0, 2h],\qquad\qquad\qquad\qquad\quad\\
\\
0,~\quad \quad \quad \quad \quad \quad \quad \quad \quad otherwise,\qquad\qquad\qquad\qquad\quad\quad
\end{cases}
\end{equation}
when $i$ is odd and $1\leq i \leq n-1$,
\begin{equation}
\theta_{i}(t)=
\begin{cases}\nonumber
\frac{-1}{h ^{2}}(t-(i-1)h)(t-(i+1)h),\quad\quad t\in [(i-1)h, (i+1)h],\\
\\
0,\quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \qquad \qquad \quad \quad \quad otherwise,
\end{cases}
\end{equation}
when $i$ is even and $2\leq i \leq n-2$,
\begin{equation}\nonumber
\theta _{i}(t)=
\begin{cases}
\frac{1}{2h ^{2}}(t-(i-1)h)(t-(i-2)h),\quad\quad t\in [(i-2)h, ih ],\\
\\
\frac{1}{2h ^{2}}(t-(i+1)h)(t-(i+2)h),\quad\quad t\in [ih, (i+2)h ],\\
\\
0,\quad \quad \quad \quad \quad \quad \quad  \quad\quad  \quad\quad\quad\qquad\qquad otherwise,\qquad\quad
\end{cases}
\end{equation}
and
\begin{equation}\nonumber
\theta _{n}(t)=
\begin{cases}
\frac{1}{2h ^{2}}(t-(T-h))(t-(T-2h)),\quad\quad t\in [T-2h, T],\\
\\
0,\quad \quad \quad \quad \quad \quad \quad \quad \qquad\quad \quad \quad\quad\quad\quad otherwise.\qquad\qquad
\end{cases}
\end{equation}
According to the definition of MHFs, we have
\begin{equation}\label{eq4}
\quad \quad\theta _{i}(jh)=
\begin{cases}
1,\quad\quad i=j,\qquad\qquad\qquad\qquad\qquad\\
\\
0,\quad\quad i\neq j,\qquad\qquad\qquad\qquad
\end{cases}
\end{equation}
\begin{equation*}\label{eq6}
\theta _{i}(t)\theta_{j}(t)=
\begin{cases}
0,\quad\quad \text { \textit{i} is even and}~\vert i-j\vert\geq 3,\\
\\
0,\quad\quad \text{ \textit{i} is odd and}~\vert i-j \vert \geq 2.
\end{cases}
\end{equation*}
and
\begin{equation*}\label{eq5}
\sum _{i=0}^{n}\theta _{i}(t)=1.\qquad\qquad
\end{equation*}
%\subsection{Function expansion with modified hat functions}

An arbitrary function $ g(t)\in L^{2}(\Omega )$ can be expanded in terms of MHFs as
\begin{equation}\label{eq11}
g(t)\simeq g_n(t)= \sum_{i=0}^{n}c_{i}\theta _{i}(t)= \mathbf{C}^{T} \Theta (t)=\Theta^{T}(t)\mathbf{C},
\end{equation}
where $\Theta (t) $ is a $(n+1)$-vector of MHFs  as
\begin{equation}\label{eq7}
\Theta (t)=[\theta_{0}(t) ,\dots,\theta _{i}(t), \dots , \theta _{n}(t)]^{T},
\end{equation}
and
\begin{equation}\nonumber
 \mathbf{C}=[c_{0},\dots ,c_{i},\dots ,c_{n}]^{T},
\end{equation}
in which $ c_{i}=g(ih),~ i=0,1,\dots ,n $.


Let $k(t,s)$ be an arbitrary function of two variables, defined on $L^{2}(\Omega \times \Omega)$. Similarly, it can be expanded in terms of MHFs  as
\begin{equation}\label{eq12}
k(t,s)\simeq k_{n}(t,s)=\Theta^{T}(t)\mathbf{K}\Theta (s)=\Theta ^{T}(s)\mathbf{K}^{T}\Theta (t),
\end{equation}
where
\[{\mathbf{K}} = \left( {\begin{array}{*{20}{c}}
{{\mathtt{k}_{0,0}}}& \cdots &{{\mathtt{k}_{0,n}}}\\
 \vdots &  \ddots &\vdots  \\
{{\mathtt{k}_{n,0}}}& \cdots &{{\mathtt{k}_{n,n}}}
\end{array}} \right)_{(n+1) \times (n+1)},\]
in which ${\mathtt{k}_{i,j}}=k(ih,jh),~ i,j=0,1,\dots ,n $.  Also,  $ \Theta (s) $ and $ \Theta (t)$ are MHFs vectors of dimension $ (n+1) $. \\
%Let

According to Eq. (\ref{eq4}), if $\Theta (t)\Theta^{T}(t)$ is expanded by MHFs, we get
%By expanding components of $V(t)V^{T}(t)$  based on MHFs and  according  Eq. (\ref{eq4}), we get
\begin{equation}\label{eq8}
\Theta (t)\Theta^{T}(t)\simeq \text{diag}(\Theta(t)),\qquad
\end{equation}
thus, we have
\begin{equation}\label{eq10}
\Theta (t)\Theta^{T}(t)	\mathbf{Q}\simeq \tilde{\mathbf{Q}}\Theta (t),\qquad\quad
 \end{equation}
 where $ \mathbf{Q} $ is a $ (n+1) $-vector and $ \tilde{\mathbf{Q}} $ is a diagonal matrix with the elements of $\mathbf{Q}$. Also, if $ \mathbf{G}$ be a $ (n+1)\times (n+1) $ matrix, we attain
  \begin{equation}\label{eq9}
\Theta ^{T}(t)\mathbf{G} \Theta (t)\simeq \hat{\mathbf{G}}^{T} \Theta (t),\qquad\qquad\qquad
 \end{equation}
 where $\hat{\mathbf{G}}$ is a $(n+1)$-vector with components equal to the diagonal entries of the matrix $ \mathbf{G} $.

 % \subsection{Operational matrix of modified hat functions}
Now, we review some operational matrices based on MHFs that will be used in our proposed method.
%\subsection{Integer order operational matrix of intgeration}
Let $\Theta (t)$ be the MHFs vector defined in Eq. $(\ref{eq7})$. Hence the Volterra integral of $\Theta (t)$ can be estimated as  follows:
\begin{equation}\label{eq16}
\int _{0}^{t}\Theta (s)\mathrm{d}{s} \simeq \mathcal{L}\Theta (t),\qquad
\end{equation}
in which $\mathcal{L}$ is the $(n+1)\times (n+1)$ operational matrix of integration for MHFs defined as \cite{Momenzade}
\begin{equation}\nonumber
\mathcal{L}=
\begin{pmatrix}
0& \rho_{1}& \rho_{2}& \rho_{2}& \rho_{2}&  \dots &\rho_{2} &\rho_{2}&\rho_{2}&\rho_{2}\\
0& \rho_{3}&  \rho_{4}&  \rho_{4}&  \rho_{4} & \dots & \rho_{4} & \rho_{4}& \rho_{4}& \rho_{4} \\
0&  \rho_{5} &\rho_{2} &\rho_{6} &\rho_{3} & \dots &\rho_{3} & \rho_{3}& \rho_{3}&\rho_{3}\\
&\quad &\ddots &\ddots &\ddots &\ddots &\ddots &\quad &\quad \\
&\quad &\quad &\ddots &\ddots &\ddots &\ddots &\ddots &\quad \\
&\quad &\quad &\quad &\ddots &\ddots &\ddots &\ddots &\ddots \\
%0& 0& 8& 16& 16&  \dots & 16 & 16\\
%\vdots &\vdots & \vdots &\vdots & \vdots  &\ddots & \vdots & \vdots\\
0&0&0&0&0 &\dots & \rho_{5}&\rho_{2} &\rho_{6} &\rho_{3}\\
0&0&0&0&0 &  \dots &0&0& \rho_{3} & \rho_{4}\\
0&0&0&0&0&\dots &0&0&  \rho_{5} &\rho_{2}
\end{pmatrix},
\end{equation}
where $ \rho_{1}=\frac{5h}{12}$,  $ \rho_{2}=\frac{h}{3}$,  $ \rho_{3}=\frac{2h}{3}$,  $ \rho_{4}=\frac{4h}{3}$,  $  \rho_{5}=-\frac{h}{12}$ and $  \rho_{6}=\frac{3h}{4}$.

%\subsection{Fractional order Operational Matrix of Integration}
\begin{theorem}
Let $\Theta (t)$ be the MHFs vector given by Eq. (\ref{eq7}), then
\begin{equation}\label{eq18}
\mathcal{I}^{\alpha_{i}}\Theta (t)\simeq \mathcal{L}^{\alpha _{i}}\Theta (t),\quad
\end{equation}
where $\mathcal{L}^{\alpha _{i}}$ is the $(n+1)\times (n+1)$ operational matrix of fractional-order integration
%of order $ \alpha_{i} $ of $ \Theta (t) $ which is defined as
\begin{equation}\nonumber
\mathcal{L}^{\alpha _{i}}=
\begin{pmatrix}
0& \phi^{i}_{1} &\phi_{2}^{i} & \phi_{3}^{i}&\phi _{4}^{i}&\dots & \phi_{n-1}^{i} &\phi_ {n}^{i}\\
0& \vartheta^{i} _{0} &\vartheta^{i} _{1}& \vartheta^{i}_{2}&\vartheta^{i} _{3}& \dots & \vartheta^{i}_{n-2}&\vartheta^{i}_{n-1}\\
0& \eta^{i} _{-1}&\eta^{i} _{0}&\eta^{i}_{1}&\eta^{i}_{2}&\dots  & \eta^{i} _{n-3}&\eta^{i}_{n-2}\\
0&0&0&\vartheta^{i}_{0}&\vartheta^{i}_{1}&\dots &\vartheta^{i}_{n-4}&\vartheta^{i}_{n-3}\\
0&0&0&\eta^{i}_{-1}&\eta^{i} _{0}&\dots &\eta^{i} _{n-5}&\eta^{i} _{n-4}\\
 \vdots &\vdots &\vdots &\vdots & \vdots &\quad  & \vdots & \vdots \\
 0&0&0&0&0&\dots & \vartheta^{i} _{0} &\vartheta^{i} _{1}\\
 0&0&0&0&0&\dots  & \eta^{i} _{-1}&\eta^{i} _{0}
\end{pmatrix},
\end{equation}
in which
\begin{eqnarray*}\nonumber
\phi^{i} _{1}&=&  \frac{h ^{\alpha _{i}}\alpha_{i}(3+2\alpha_{i})}{2\Gamma (\alpha _{i} +3)}, \qquad \vartheta^{i} _{0}= \frac{2 h ^{\alpha _{i}}(1+\alpha_{i}) }{\Gamma (\alpha _{i} +3)},\\
%\end{equation}
%\[
\eta _{-1}^{i}&=&-\frac{h ^{\alpha _{i}}\alpha_{i}}{2\Gamma (\alpha _{i} +3)}  ,\qquad\quad \eta^{i} _{0}= \frac{h ^{\alpha _{i}}2^{\alpha_{i} +1}(2-\alpha_{i})}{2\Gamma (\alpha _{i} +3)},\\
%\]
%\[
\eta^{i}_{1}&=&\frac{h ^{\alpha _{i}}}{2\Gamma (\alpha _{i} +3)} \Big (  3^{\alpha _{i}+1}(4-\alpha _{i})-6(2+\alpha_{i})  \Big ),\\
%\]
%\begin{eqnarray}\nonumber
\phi _{k}^{i}&=&\frac{h ^{\alpha _{i}}}{2\Gamma (\alpha _{i} +3)} \Big (k^{\alpha_{i} +1}(2k-6-3\alpha_{i})+2j^{\alpha_{i}} (1+\alpha_{i} )(2+\alpha_{i}) \qquad\\\nonumber
&-&(k-2)^{(\alpha_{i} +1)}(2k-2+\alpha_{i})\Big ), \qquad k=2,3,\dots , n,\\
%\end{eqnarray}
%\begin{eqnarray}\nonumber
\quad\vartheta _{k}^{i}&=&\frac{2h ^{\alpha _{i}}}{\Gamma (\alpha _{i} +3)} \Big (  (k-1)^{\alpha_{i} +1}(k+1+\alpha_{i} )\qquad\qquad\qquad\qquad\qquad\qquad\\\nonumber
&-&(k+1)^{\alpha _{i}+1}(k-1-\alpha_{i} )\Big ), \qquad k=1,2, \dots , n-1,
\end{eqnarray*}
and
\begin{eqnarray}\nonumber
\eta ^{i}_{k}=\frac{h ^{\alpha _{i}}}{2\Gamma (\alpha _{i} +3)} \Big ((k+2)^{\alpha _{i}+1}(2k+2-\alpha_{i})-6k^{\alpha _{i}+1}(2+\alpha_{i})\qquad\\\nonumber
-(k-2)^{\alpha_{i} +1}(2k-2+\alpha_{i})\Big ), \qquad  k= 2,3,\dots , n-2.
\end{eqnarray}
\end{theorem}
\begin{proof}
See \cite{Nemati}.
\end{proof}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{theorem}
Let $\Theta (t)$ be the MHFs vector given by Eq. (\ref{eq7}), then
\begin{equation}\label{eq26}
\int _{0}^{t}\Theta (s)\mathrm{d}{\mathcal{B}(s)} \simeq\mathcal{L}_{s}\Theta (t),\qquad
 \end{equation}
where $ \mathcal{L}_{s}$ is the operational matrix of stochastic integration of MHFs as
{\tiny
 \begin{equation}\nonumber
 \mathcal{L}_{s}=
 \begin{pmatrix}
0& \xi_{1}& \xi_{2}&\xi_{2}&\xi_{2}&\dots & \xi _{2}&\xi _{2}&\xi _{2}\\
0&\mathcal{B}(h)+\beta_{1,1}&\beta _{2,1}&\beta _{2,1}&\beta _{2,1}&\dots &\beta _{2,1}&\beta _{2,1}&\beta _{2,1}\\
0&\gamma _{1,2}&\mathcal{B}(2h)+\gamma _{2,2}&\gamma _{3,2}&\gamma _{4,2}&\dots &\gamma _ {4,2}&\gamma_ {4,2}&\gamma_ {4,2}\\
&\ddots &\ddots &\ddots &\ddots &\quad &\ddots &\ddots &\ddots \\
0&0&0&0&0&\dots & \mathcal{B}(T-2h)+\gamma _{2,n-2}&\gamma _{3,n-2}&\gamma _{4,n-2}\\
0&0&0&0&0&\dots &0& \mathcal{B}(T-h)+\beta_{1,n-1} &\beta_{2,n-1}\\
0&0&0&0&0&\dots &0& \gamma _{1,n} & \mathcal{B}(T)+\gamma _{2,n}
 \end{pmatrix},
 \end{equation}
}
 in which
 \begin{equation}\begin{split}\label{eq28}\nonumber
\xi _{1}=& \int_{0}^{h}\frac{-1}{2h^{2}}(2s-3h)~\mathcal{B}(s)\mathrm{d}{s},\\ \nonumber
\quad \xi _{2}=& \int_{0}^{2h}\frac{-1}{2h^{2}}(2s-3h)~\mathcal{B}(s)\mathrm{d}{s},\\ \nonumber
\beta _{1},_{i}=&\int _{(i-1)h}^{ih}\frac{2}{h^{2}} (s-ih)~\mathcal{B}(s)\mathrm{d}{s},\qquad i=1,3,\dots , n-1,\\ \nonumber
\quad \beta _{2},_{i}=&\int_{(i-1)h}^{(i+1)h}\frac{2}{h^{2}}(s-ih)~\mathcal{B}(s)\mathrm{d}{s},\qquad i=1,3,\dots , n-1,\\ \nonumber
\gamma _{1},_{i}=& \int_{(i-2)h}^{(i-1)h}\frac{-1}{2h^{2}}(2s-(2i-3)h)~\mathcal{B}(s)\mathrm{d}{s},\qquad i=2,4,\dots , n-2, \\ \nonumber
\gamma _{2},_{i}=&\int_{(i-2)h}^{ih}\frac{-1}{2h^{2}}(2s-(2i-3)h)~\mathcal{B}(s)\mathrm{d}{s},~ \qquad i=2,4,\dots , n-2, \\ \nonumber
\gamma _{3},_{i}=& \int _{(i-2)h}^{ih}\frac{-1}{2h^{2}}(2s-(2i-3)h)~\mathcal{B}(s)\mathrm{d}{s}\\
+&\int _{ih}^{(i+1)h}\frac{-1}{2h^{2}}(2s-(2i+3)h)~\mathcal{B}(s)\mathrm{d}{s}, \qquad i=2,4,\dots , n-2,\\
\gamma _{4},_{i}= &\int _{(i-2)h}^{ih}\frac{-1}{2h^{2}}(2s-(2i-3)h)~\mathcal{B}(s)\mathrm{d}{s}\\
+&\int _{ih}^{(i+2)h}\frac{-1}{2h^{2}}(2s-(2i+3)h)~\mathcal{B}(s)\mathrm{d}{s}, \qquad i=2,4,\dots , n-2.
\end{split}\end{equation}
\end{theorem}
\begin{proof}
See \cite{Momenzade}.
\end{proof}
%The title of your section 2
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

\section{Description of the method}
In this section, by using the operational matrices based on MHFs and other concepts defined in the previous section, a numerical method will be proposed to solve the problem (\ref{eq1})-(\ref{eq2}). First, by Riemann–Liouville fractional integration of the equations of (\ref{eq1}), we get
\begin{eqnarray}
y_{i}(t)=d_{i}+\mathcal{I}^{\alpha _{i}}(f_{i}(t))+\mu_{i}( \mathcal{I}^{\alpha_{i}}y_{i}(t))+\sum _{j=1}^{m}\mathcal{I}^{\alpha _{i}}(\int _{0}^{t}k_{ij}(t,s)y_{j}(s)\mathrm{d}{s})\nonumber\\
+\sigma \sum _{j=1}^{m}\mathcal{I}^{\alpha _{i}}( \int _{0}^{t}\hat{k}_{ij}(t,s)y_{j}(s)\mathrm{d}{\mathcal{B}(s)}),\qquad i=1,...,m. \label{eq35}
\end{eqnarray}

Now, we approximate the functions $y_{i}(t)$, $ d_{i}$, $f_{i}(t)$, $k_{ij}(t,s)$, $\hat{k}_{ij}(t,s)$, for  \break $ i,j=1,2,\dots, m $ in terms of MHFs as
\begin{equation}\begin{split}\nonumber
y_{i}(t)\simeq &\mathbf{C}_{i}^{T}\Theta (t)=\Theta^{T}(t)\mathbf{C}_{i},\quad\quad\quad\quad\quad\quad\quad\quad\\ \nonumber
d_{i}\simeq & \mathbf{d}_{i}^{T}\Theta (t)= \Theta^{T}(t)\mathbf{d}_{i},\quad\quad\qquad\qquad\qquad\quad\\ \nonumber
f_{i}(t)\simeq &\mathbf{f}_{i}^{T}\Theta (t)=\Theta^{T}(t)\mathbf{f}_{i},\qquad\qquad\qquad\quad\quad\\ \nonumber
k_{ij}(t,s)\simeq &\Theta ^{T}(t)\mathbf{K}_{ij}\Theta (s)=\Theta^{T}(s)\mathbf{K}_{ij}^{T}\Theta (t),\\
\hat{k}_{ij}(t,s)\simeq &\Theta ^{T}(t)\hat{\mathbf{K}}_{ij}\Theta (s)=\Theta^{T}(s)\hat{\mathbf{K}}_{ij}^{T}\Theta (t),
\end{split}\end{equation}
%\begin{eqnarray}
%K_{ij}=(k^{ij}_{st}),~~ s,t=0,1,\dots, n, ~~i,j=1,2,\dots ,m,\\ \nonumber
%\hat{K}_{ij}=(\hat{k}^{ij}_{st}),~~~ s,t=0,1,\dots, n,~~i,j=1,2,\dots ,m.\label{eq15}
%\end{eqnarray}
where $\Theta (t)$ is MHFs vector and $ \mathbf{C}_{i}, \mathbf{d}_{i}, \mathbf{f}_{i},$ are MHFs coefficients vectors of $ y_{i}(t)$,  $d_{i}$, and $f_{i}(t)$. Also, $\mathbf{K}_{ij}$ and $\hat{\mathbf{K}}_{ij}$ are  MHFs coefficients matrices of $k_{ij}(t,s)$ and $\hat{k}_{ij}(t,s)$, respectively. Substituting these relations in Eq. (\ref{eq35}) results
\begin{eqnarray}\nonumber
\mathbf{C}^{T}_{i}\Theta (t)=\mathbf{d}_{i}^{T}\Theta (t)+\mathcal{I}^{\alpha_ {i}}(\mathbf{f}_{i}^{T}\Theta (t))+\mu_{i}\mathcal{I}^{\alpha_ {i}}( \mathbf{C}_{i}^{T}\Theta (t))\qquad\qquad\qquad\quad\\ \nonumber
+\sum _{j=1}^{m}\mathcal{I}^{\alpha_{i}}\left(\Theta^{T}(t)\mathbf{K}_{ij}\int _{0}^{t}\Theta (s)\Theta^{T}(s)\mathbf{C}_{j}\mathrm{d}{s}\right) \qquad\quad~~\\ \label{eq37}
+\sigma \sum_{j=1}^{m}\mathcal{I}^{\alpha_{i}}\left(\Theta^{T}(t)\hat{\mathbf{K}}_{ij}\int _{0}^{t}\Theta (s)\Theta^{T}(s)\mathbf{C}_{j}\mathrm{d}{\mathcal{B}(s)}\right).
\end{eqnarray}
The  integral part of Eq. (\ref{eq37}) can be estimated according to Eqs.  (\ref{eq10})-(\ref{eq26}) as
\begin{eqnarray}\nonumber
\mathcal{I} ^{\alpha_{i}}\left(\Theta^{T}(t)\mathbf{K}_{ij}\int _{0}^{t}\Theta (s)\Theta ^{T}(s)\mathbf{C}_{j}\mathrm{d}{s}\right)\qquad\qquad\qquad\\ \nonumber
=\mathcal{I}^{\alpha _{i}}\left( \Theta^{T}(t)\mathbf{K}_{ij}\int _{0}^{t}\tilde{\mathbf{C}}_{j}\Theta (s)\mathrm{d}{s}\right)  \\\nonumber
\simeq  \mathcal{I}^{\alpha _{i}}\left(\Theta ^{T}(t)\mathbf{K}_{ij}\tilde{\mathbf{C}}_{j}\mathcal{L}\Theta (t)\right),\qquad\\ \label{eq46}
%=I^{\alpha _{i}}\left( (\widehat{ K_{ij}\tilde{Y}_{j}P})^{T}V(t)\right)=\left( \widehat{K_{ij}\tilde{Y}_{j}P}\right) ^{T}P^{\alpha _{i}}V(t).\qquad\qquad\quad ~
=\mathcal{I}^{\alpha _{i}}\left(\hat{ \mathcal{A}}_{j}^{T}\Theta (t)\right)\simeq  \hat{ \mathcal{A}}_{j}^{T}\mathcal{L}^{\alpha _{i}}\Theta (t),\quad
\end{eqnarray}
and for stochastic integral, we have
\begin{eqnarray}\nonumber
 \mathcal{I}^{\alpha_{i}}\left(\Theta ^{T}(t)\hat{\mathbf{K}}_{ij}\int _{0}^{t}\Theta (s)\Theta ^{T}(s)\mathbf{C}_{j}\mathrm{d}{\mathcal{B}(s)}}\right) \qquad\qquad\qquad\quad\\ \nonumber
=\mathcal{I}^{\alpha _{i}}\left(\Theta ^{T}(t)\hat{\mathbf{K}}_{ij}\int _{0}^{t}\tilde{\mathbf{C}}_{j}\Theta (s)\mathrm{d}{\mathcal{B}(s)}\right)\quad\\\nonumber
 \simeq  \mathcal{I}^{\alpha _{i}}\left( \Theta^{T}(t)\hat{\mathbf{K}}_{ij}\tilde{\mathbf{C}}_{j}\mathcal{L}_{s}\Theta (t)\right),\quad\qquad\quad\\ \label{eq47}
%=I^{\alpha _{i}}\left( (\widehat{\hat{K}_{ij}\tilde{Y}_{j}P_{s}})^{T}V(t)\right)= \left( \widehat{\hat{K}_{ij}\tilde{Y}_{j}P_{s}}\right) ^{T}P^{\alpha _{i}}V(t),\qquad\qquad\qquad ~~
=\mathcal{I}^{\alpha _{i}}\left( \hat{\mathcal{S}}_{j}^{T}\Theta (t)\right)\simeq \hat{\mathcal{S}}_{j}^{T}\mathcal{L}^{\alpha _{i}}\Theta (t),~\quad\qquad
\end{eqnarray}
where $\tilde{\mathbf{C}_{j}}=diag(Y_{j})$. Also, $ \hat{\mathcal{A}}_{j}$ and $\hat{\mathcal{S}}_{j}$ are $ (n+1) $-vectors and their elements consist of diagonal entries of matrices $\mathcal{A}_{j}=\mathbf{K}_{ij}\tilde{\mathbf{C}}_{j}\mathcal{L}  $ and $\mathcal{S}_{j}= \hat{\mathbf{K}}_{ij}\tilde{\mathbf{C}}_{j}\mathcal{L}_{s} $. By using Eqs. (\ref{eq46}) and (\ref{eq47}), for $ i=1,2,\dots,m$, we get
\begin{equation}\label{eq480}
\mathbf{C}_{i}^{T}=\bar{\mathbf{T}}_{i}^{T}+\mu_{i}\mathbf{C}_{i}^{T}\mathcal{L}^{\alpha_{i}}+ \sum_{j=1}^{m}\left( \hat{\mathcal{A}}_{j}^{T}+\sigma \hat{\mathcal{S}}_{j}^{T}\right) \mathcal{L}^{\alpha_{i}} ,
%i=1,\dots ,m.\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\quad\nonumber
\end{equation}
 where
%Rewriting (\ref{eq48}) yields the following system:
%\begin{equation}\label{eq49}
%AY_{i}^{T}-\sum_{j=1}^{m} B_{ij} .Y_{j}=\bar{F}_{i}^{T},\qquad i=1,\dots,m,
%(I-\mu)Y_{i}^{T}-\sum _{j=1}^{n}\left[ \left( K_{ij}P\right)^{T}+\sigma \left( \hat{K}_{ij}P_{s}\right) %^{T}\right].P^{\alpha_{i}}Y_{j}=\bar{F}_{i}^{T},
\begin{equation}\nonumber
%A&=&(1-\mu_i)I, \\ \nonumber
%B_{ij}&=&\left[ \left( K_{ij}P\right)^{T}+\sigma \left( \hat{K}_{ij}P_{s}\right)^{T}\right]P^{\alpha_{i}},\\  \nonumber
\bar{\mathbf{T}}_{i}^{T}=\mathbf{d}_{i}^{T}+\mathbf{f}_{i}^{T}\mathcal{L}^{\alpha_{i}}. \nonumber
\end{equation}

For each $  i=1,2\dots, m $, the relation  (\ref{eq480}) is a system of  $ (n+1)\times (n+1) $ linear algebraic equations.
% can provide the unknown entries of the coefficient vector $ \mathbf{C}_{i} $.
 Solving this system leads to an approximate solution for the problem (\ref{eq1})-(\ref{eq2}), in the form
\begin{equation}
y_{i}(t)\simeq \mathbf{C}_{i}^{T}\Theta (t),\qquad i=1,2,\dots ,m.
\end{equation}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Error estimate}
In this section, an error estimate of the proposed algorithm is discussed. Here, we consider the norm
\begin{equation}
 \Vert g \Vert=\mathbb{E}[\sup_{t\in \Omega}\vert g(t)\vert],
\end{equation}
%for any continuous function $g(t)$, 
where $\mathbb{E}[.]$ is the mathematical expectation.

\begin{theorem}(\cite{Mirzaee2})\label{theo1}
Suppose $g(t)\in C^{3}(\Omega )$ and $g_{n}(t)$ be the MHFs expansion of $g(t)$ as defined in Eq. (\ref{eq11}). Then, we have
\begin{equation}
\Vert g(t)-g_{n}(t)\Vert\simeq O(h ^{3}).
\end{equation}
\end{theorem}

\begin{theorem}(\cite{Mirzaee2})\label{theo2}
Suppose $k(t,s)\in C^{3}(\Omega\times\Omega )$ and $k_{n}(t,s)$ is the MHFs expansion of $k(t,s)$ as defined in Eq. (\ref{eq12}). Then
\begin{equation}
\Vert k(t,s)-k_{n}(t,s)\Vert \simeq O(h^{3}).
\end{equation}
\end{theorem}

\begin{theorem}
Let  $\mathbf{Y}_{n}(t)$ is the numerical solution of (\ref{eq1}) obtained by the proposed method, $\mathbf{Y}(t)$ is its exact solution and
 $ \mathbf{R}_{n}(t) $ is the residual error for this numerical solution.
Also, assume that $ \Vert \mathbf{Y}(t)\Vert\leq \lambda  $, $ \Vert \mathbf{k}(t,s)\Vert\leq\delta $ and $  \Vert \mathbf{\hat{k}}(t,s)\Vert\leq\hat{\delta} $, in which $ \lambda$, $ \delta $ , $\hat{\delta}   $
%and also $\gamma, \bar{\mu}, T, c_{3}, \sigma, \bar{\eta}, c_{4} $
are real positive constants. Then, $\Vert \mathbf{R}_{n}(t)\Vert  $ tends to zero, when $ n\rightarrow \infty $.
%when $n\longrightarrow \infty  $.
\end{theorem}

\begin{proof}
 From (\ref{eq35}) subject to (\ref{eq2}), we can rewrite the system (\ref{eq1}) in the following form
\begin{eqnarray*}\nonumber
\mathbf{Y}(t)=\Lambda_{\text{int}}+\int_{0}^{t}\varpi (t,s)\mathbf{F}(s)\mathrm{d}{s}+\mu\int_{0}^{t}\varpi (t,s)\mathbf{Y}(s)\mathrm{d}{s}\qquad\qquad\qquad \\\nonumber
+\int _{0}^{t}\varpi (t,s)\left( \int_{0}^{s}\mathbf{k}(s,r)\mathbf{Y}(r)\mathrm{d}{r}\right)  \mathrm{d}{s}\qquad\qquad\qquad\qquad\\\label{eq52}
+\sigma\int_{0}^{t}\varpi (t,s) \left( \int_{0}^{s}\mathbf{\hat{k}}(r,s)\mathbf{Y}(r)\mathrm{d}{\mathcal{B}(r)}\right) \mathrm{d}{s}\qquad\qquad
\end{eqnarray*}
where
\begin{equation*}\label{54}
\varpi ( t,s)= \text{diag} \left(  \frac{(t-s)^{\alpha_{1}-1}}{\Gamma (\alpha _{1})} , \dots ,  \frac{(t-s)^{\alpha_{i}-1}}{\Gamma (\alpha _{i})}, \dots ,  \frac{(t-s)^{\alpha_{m}-1}}{\Gamma (\alpha _{m})} \right).
\end{equation*}
Also, the numerical solution for system  (\ref{eq35}) satisfies the following matrix form
\begin{eqnarray*}\nonumber
\mathbf{Y}_{n}(t)=\Lambda_{\text{int}}^{n}+\int_{0}^{t}\varpi (t,s)\mathbf{F}_{n}(s)\mathrm{d}{s}+\mu\int_{0}^{t}\varpi (t,s)\mathbf{Y}_{n}(s)\mathrm{d}{s}\qquad\qquad\qquad \\\nonumber
+\int _{0}^{t}\varpi (t,s)\left( \int_{0}^{s}\mathbf{k}_{n}(s,r)\mathbf{Y}_{n}(r)\mathrm{d}{r}\right)  \mathrm{d}{s}\qquad\qquad\qquad\qquad\\\label{eq55}
+\sigma\int_{0}^{t}\varpi (t,s) \left( \int_{0}^{s}\mathbf{\hat{k}}_{n}(r,s)\mathbf{Y}_{n}(r)\mathrm{d}{\mathcal{B}(r)}\right) \mathrm{d}{s}+ \mathbf{R}_{n}(t).
\end{eqnarray*}
The residual function $  \mathbf{R}_{n}(t) $  can be obtained from the following relation
%Subtracting Eq. (\ref{eq52}) from Eq. (\ref{eq55}) yields
\begin{equation}\label{eq57}
 \mathbf{R}_{n}(t) = - \textbf{e}_{n}(t) + \textbf{e}_{int}+\textbf{e} [\mathbf{F}](t)+\textbf{e} [\mathbf{Y}](t)+\textbf{e} [\mathbf{\mathbf{k}}](t)+\textbf{e} [\mathbf{\mathbf{\hat{k}}}](t),
\end{equation}
in which
\begin{eqnarray}
 \textbf{e}_{n}(t)=\mathbf{Y}(t)- \mathbf{Y}_{n}(t), \quad \textbf{e}_{int}=\Lambda_{\text{int}}-\Lambda_{\text{int}}^{n},\qquad  \qquad  \qquad  \qquad  \nonumber \\
\textbf{e} [\mathbf{F}](t)= \int_{0}^{t}\varpi (t,s) \left[ \mathbf{F}(s)-\mathbf{F}_{n}(s)\right] \mathrm{d}{s}, \quad \textbf{e} [\mathbf{Y}](t)=\mu \int_{0}^{t}\varpi (t,s)  \textbf{e}_{n}(s) \mathrm{d}{s},\qquad~~~~~ \nonumber \\ \label{eq81}
\textbf{e} [\mathbf{\mathbf{k}}](t)=\int _{0}^{t}\varpi (t,s)\mathcal{J}_{1}(s)\mathrm{d}{s},\qquad\quad \textbf{e} [\mathbf{\mathbf{\hat{k}}}](t)=\sigma \int _{0}^{t}\varpi (t,s)\mathcal{J}_{2}(s)\mathrm{d}{s}.\qquad \qquad\qquad~ \nonumber
\end{eqnarray}
%in the relation (\ref{eq81}), $ \mathcal{J}_{1}(t) $ and  $ \mathcal{J}_{2}(t) $ are defined as
where
\begin{equation}\label{eq72}
 \mathcal{J}_{1}(t)=\int _{0}^{t} \left( \mathbf{k}(t,s)\mathbf{Y}(s)-\mathbf{k}_{n}(t,s)\mathbf{Y}_{n}(s)\right)  \mathrm{d}{s},
\end{equation}
%and in realtion (\ref{eq81}) $ J_{2}(t) $ is defined
and
\begin{equation}\label{eq73}
 \mathcal{J}_{2}(t) = \int _{0}^{t}\left( \mathbf{\hat{k}}(t,s)\mathbf{Y}(s)-\mathbf{\hat{k}}_{n}(t,s)\mathbf{Y}_{n}(s)\right) \mathrm{d}{\mathcal{B}(s)}.
\end{equation}
%Taking mathematical expectation from the both sides of
 Eq. (\ref{eq57}) results
\begin{eqnarray}\nonumber
 \Vert   \mathbf{R}_{n}(t)\Vert\leq  \Big ( \Vert \textbf{e}_{int}\Vert +\Vert\textbf{e}_{n}(t)\Vert   +\Vert\textbf{e} [\mathbf{F}](t)\Vert \quad\qquad\qquad\qquad \\\label{eq59}
 +\Vert\textbf{e} [\mathbf{Y}](t)\Vert +\Vert\textbf{e} [\mathbf{\mathbf{k}}](t)\Vert +\Vert\textbf{e} [\mathbf{\mathbf{\hat{k}}}](t)\Vert \Big ).
\end{eqnarray}
Now, using  Theorem \ref{theo1} results
\begin{eqnarray}\label{eq90}
 \Vert \textbf{e}_{int} \Vert \leq c_{1}h^{3},\\
  \Vert\textbf{e}_{n}(t)\Vert \leq c_{2}h^{3}.
\end{eqnarray}
%Now, let $\gamma=\Vert \varpi (t,s)\Vert$, then by using Theorem \ref{theo1}, we have
\begin{eqnarray}\label{eq60}
 \Vert \textbf{e} [\mathbf{F}](t) \Vert\leq \left(\int_{0}^{t}\Vert \varpi (t,s)\Vert \Vert \mathbf{F}(s)-\mathbf{F}_{n}(s) \Vert \mathrm{d}{s} \right) \nonumber\\
\leq \gamma  \left( \int_{0}^{t} \Vert \mathbf{F}(s)-\mathbf{F}_{n}(s)\Vert \mathrm{d}{s}\right)\qquad\quad\nonumber \\
\leq \gamma T  \Vert \mathbf{F}(s)-\mathbf{F}_{n}(s)\Vert  \leq c_{3}\gamma T h^{3}.~\qquad
\end{eqnarray}
where $\gamma=\Vert \varpi (t,s)\Vert _\infty$, and
%Let , then
\begin{eqnarray}\label{eq61}
\Vert \textbf{e} [\mathbf{Y}](t) \Vert\leq \left(\int_{0}^{t}\Vert \varpi (t,s)\Vert \Vert \textbf{e}_{n}(s) \Vert \mathrm{d}{s}\right)~ \nonumber\\
\leq \gamma\bar{\mu}\left( \int_{0}^{t} \Vert \textbf{e}_{n}(s)\Vert \mathrm{d}{s}\right)~~\quad\quad\nonumber \\
\leq \gamma T\bar{\mu}  \Vert \textbf{e}_{n} (t)\Vert \leq   c_{2}\gamma T\bar{\mu} h^{3}. \quad
\end{eqnarray}
where $\bar{\mu}=\Vert \mu\Vert_\infty$.
Moreover, we have
\begin{eqnarray*}\label{eq62}
\Vert \textbf{e} [\mathbf{\mathbf{k}}](t) \Vert\leq \left(\int_{0}^{t}\Vert \varpi (t,s)\Vert \Vert \mathcal{J}_{1}(s) \Vert \mathrm{d}{s}\right) \nonumber\\
\leq \gamma \left( \int_{0}^{t} \Vert \mathcal{J}_{1}(s)\Vert \mathrm{d}{s}\right)\quad\qquad\nonumber \\
\leq \gamma T  \Vert \mathcal{J} _{1}(t)\Vert ,\qquad\qquad\qquad
\end{eqnarray*}
in which by using Theorem \ref{theo2} and Eq. (\ref{eq72}), we get
\begin{eqnarray*} \label{eq63}
  \Vert \mathcal{J}_{1}(t)\Vert \leq  \left( \int _{0}^{t}\Vert \mathbf{k}(t,s)\mathbf{Y}(s)-\mathbf{k}_{n}(t,s)\mathbf{Y}_{n}(s)\Vert  \mathrm{d}{s}\right)~ \quad\qquad\qquad\quad\qquad\quad\nonumber \\
\leq T  \Vert \mathbf{k}(t,s)\mathbf{Y}(s)-\mathbf{k}_{n}(t,s)\mathbf{Y}_{n}(s)\Vert  \quad\quad\qquad\quad\quad\qquad\quad\qquad\qquad\nonumber\\
\leq T  \Big ( \Vert \mathbf{k}(t,s)\Vert \Vert \textbf{e}_{n} (t)\Vert +\Vert \mathbf{k}(t,s)-\mathbf{k}_{n}(t,s)\Vert (\Vert \mathbf{Y}(t)\Vert +\Vert \textbf{e}_{n}(t)\Vert\right) \Big )~~\nonumber\\
\leq T ( c_{2} \delta + c_{4} \lambda )  h^{3}+c_{2} c_{4} T h^{6}, \qquad\qquad\qquad\qquad\qquad\qquad\qquad\quad
\end{eqnarray*}
thus
 \begin{equation}\label{eq91}
\Vert \textbf{e} [\mathbf{\mathbf{k}}](t)\Vert \leq \upsilon_{1}  h^{3}+ \upsilon_{2} h^{6},
 \end{equation}
where $ \upsilon_{1}= \gamma T^{2} ( c_{2} \delta + c_{4} \lambda )  $ and $ \upsilon_{2}=\gamma c_{2} c_{4} T^{2} $.

In a similar manner, we have
\begin{eqnarray*}\label{eq92}
\Vert \textbf{e} [\mathbf{\mathbf{\hat{K}}}](t) \Vert\leq\sigma \left(\int_{0}^{t}\Vert \varpi (t,s)\Vert \Vert \mathcal{J}_{2}(s) \Vert \mathrm{d}{s}\right) \nonumber\\
\leq \gamma \sigma \left( \int_{0}^{t} \Vert \mathcal{J}_{2}(s)\Vert \mathrm{d}{s}\right)\quad\qquad\nonumber \\
\leq \gamma \sigma  T  \Vert \mathcal{J} _{2}(t)\Vert .\qquad\qquad\qquad\quad
\end{eqnarray*}
By employing Theorem \ref{theo2} and Eq. (\ref{eq73}), we get
\begin{eqnarray*} \label{eq65}
  \Vert  \mathcal{J}_{2}(t)\Vert  \leq  \left( \int _{0}^{t}\Vert \mathbf{\hat{k}}(t,s)\mathbf{Y}(s)-\mathbf{\hat{k}}_{n}(t,s)\mathbf{Y}_{n}(s)\Vert  \mathrm{d}{\mathcal{B}(s)}\right)~ \quad\qquad\qquad\quad\qquad\quad\qquad\nonumber \\
\leq  \Vert \mathcal{B}(t)\Vert   \Vert \mathbf{\hat{k}}(t,s)\mathbf{Y}(s)-\mathbf{\hat{k}}_{n}(t,s)\mathbf{Y}_{n}(s)\Vert  \quad\quad\qquad\quad\quad\qquad\quad\qquad\qquad\quad\nonumber\\
\leq \Vert \mathcal{B}(t)\Vert  \Big ( \Vert \mathbf{\hat{k}}(t,s)\Vert \Vert \textbf{e}_{n} (t)\Vert +\Vert \mathbf{\hat{k}}(t,s)-\mathbf{\hat{k}}_{n}(t,s)\Vert (\Vert \mathbf{Y}(t)\Vert +\Vert \textbf{e}_{n}(t)\Vert\right) \Big )\quad~~\nonumber\\
\leq  \Vert \mathcal{B}(t)\Vert  (  c_{2} \hat{\delta} +c_{5} \lambda )  h^{3} +\Vert \mathcal{B}(t)\Vert  c_{2} c_{5} h^{6}, \qquad\qquad\qquad\qquad\qquad\qquad\quad~
\end{eqnarray*}
thus
\begin{equation}\label{eq66}
\Vert \textbf{e} [\mathbf{\mathbf{\hat{K}}}](t) \Vert \leq  \upsilon_{3}  h^{3} +\upsilon_{4} h^{6},
\end{equation}
where
\[ \upsilon_{3}= \Vert \mathcal{B}(t)\Vert  \gamma \sigma  T (  c_{2} \hat{\delta} +c_{5} \lambda ), \quad \upsilon_{3}= \Vert \mathcal{B}(t)\Vert \gamma \sigma  c_{2} c_{5} T.\]
Then, from  the relations  (\ref{eq59})-(\ref{eq66}), it can be concluded that
\[ \Vert \mathbf{R}_{n}(t)\Vert \leq \xi  h^{3}+ \hat{\xi} h^{6},\]
where
\[\xi =c_{1} +c_{2} +c_{3}\gamma T  + c_{2}\gamma T\bar{\mu} +\upsilon_{1}+\upsilon_{3},\quad \hat{\xi}=\upsilon_{2} +\upsilon_{4}.\]
Therefore, it is clear that $ \Vert  \mathbf{R}_{n}(t)\Vert  $ tends to zero, when $ h\rightarrow 0 $ (or $ n\rightarrow \infty $).
\end{proof}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Numerical implementation}
To clarify the accuracy and efficiency of the proposed scheme in  the previous sections, some test problems will be considered.
The computations have been executed on a personal computer using a 2.20 GHz
processor and the codes are written in Matlab 2017.
%The proposed algorithm  in  Intel (R) Core(TM) i3-2330 CPU@2.20 GHz.
For testing the numerical solution obtained from the presented algorithm, we run our algorithm for $ \hat{M} $ iterations. Also, the error of numerical solution is computed as
\begin{equation}\nonumber
 \Vert y_{exact}(t)-y_{num}(t)\Vert .
\end{equation}
%%%%%%%%%%%%%%%%%%%%%%%%%%

{\bf{Example 1.}} Consider the FSIDE
\begin{equation}\nonumber \label{em1}
D^{\frac{1}{2}}y(t)+y(t)=t^{2}+2\frac{t^{1.5}}{\Gamma (2.5)}+\int _{0}^{t}s d\mathcal{B}(s),~~~ t\in[0,1),
\end{equation}
with the initial condition $y(0)=0$. Here, we do not have exact solution, thus we use the approximate solution with $ n=60$  and $ \hat{M}=700 $ iterations as a replacement of the  exact solution.
% also we use numerical algorithm for sufficiently small $\varrho=\frac{1}{n} $.
A comparison between our algorithm and the spectral collocation method in \cite{Taheri} are shown in Table 1. Also, numerical solutions for $\hat{M}=1$ and $ \hat{M}=100 $ iterations are depicted in Figure 1.
\begin{table}
\begin{center}
\caption{\scriptsize Absolute errors of the numerical solution of Example 1.
% for different values of $n$.
%in comparison with spectral collocation method \cite{Taheri}
}\label{tab1}
\begin{small}
\begin{tabular}{ c c c c c }
\hline
$t $  &  Spectral method \cite{Taheri}  & Our algorithm&  Spectral method \cite{Taheri} &Our algorithm   \\
$ ~~ $ & $ n=4  $ & $ n=4$ &$ n=8 $ &$ n=8 $\\
\hline
$ 0.01 $ & $ 0.0008$ &  $ 1.2580\times 10^{-4} $ & $0.0003 $&$ 6.2431\times 10^{-5} $\\
$ 0.08 $ & $ 0.0022 $ & $ 9.0286\times 10^{-4}$ & $ 0.0005 $&$ 3.9007\times 10^{-4} $ \\
$ 0.16 $ & $ 0.0006$ & $ 1.4203\times 10^{-3} $ & $ 0.0018 $ &$ 3.8136\times 10^{-4} $\\
$ 0.30$ & $ 0.0061$ & $ 1.4378\times 10^{-3}$ & $0.0037$ &$ 1.7454\times 10^{-5} $\\
$ 0.43$ & $ 0.0061 $ & $ 2.9194\times 10^{-4}$ & $ 0.0048$&$ 2.0843\times 10^{-5} $ \\
%$ 0.55 $ & $ 0.0030$ & $ 1.0798\times 10^{-3} $ & $ 0.0024$ &$ 6.5754\times 10^{-4} $\\
%$ 0.63$ & $ 0.0012$ & $ 6.2952\times 10^{-4}$ & $~0.0000 $ &$ 9.3164\times 10^{-4} $ \\
$ 0.78$ & $ 0.0019$ & $ 1.3033\times 10^{-3}$ & $0.0007$&$ 2.7080\times 10^{-3} $ \\
$ 0.84$ & $ 0.0035$ & $ 8.4564\times 10^{-4} $ & $ 0.0027$&$ 2.06999\times 10^{-3} $ \\
$ 0.94$ & $ 0.0046 $ & $ 4.8891\times 10^{-4} $ & $0.0035$&$ 2.9305\times 10^{-3} $ \\
$ 1.00$ & $ 0.0016$ & $ 2.3861\times 10^{-3} $ & $ 0.0009$ &$ 6.4688\times 10^{-3} $\\
\hline
\hline
\end{tabular}
\end{small}
\end{center}
\end{table}






\begin{figure}
%\begin{center}
\includegraphics[width=10cm]{fig-exm1}
\caption{{\scriptsize The numerical solution of Example 1  for $\hat{M}=1$ and $ \hat{M}=100 $ iterations.
}}\label{fig1  }
%\end{center}
\end{figure}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%


{\bf{Example 2.}} Consider the system of FSIDEs
\begin{equation}
\begin{cases}
D^{\alpha_{1}}y_{1}(t)=f_{1}(t)+y_{1}(t)+\int _{0}^{t} sy_{2}(s)ds-\int _{0}^{t}y_{2}(s)d\mathcal{B}(s),&~\\ \\ \nonumber
D^{\alpha_{2}}y_{2}(t)=f_{2}(t)-y_{2}(t)+\int _{0}^{t}\frac{s^{2}}{2}y_{2}(s)ds-\frac{1}{2}\int_{0}^{t}y_{1}(s)d\mathcal{B}(s),
\end{cases}
\end{equation}
where
\begin{equation}\begin{split}\nonumber
f_{1}(t)=&\frac{t^{1-\alpha_{1}}}{\Gamma (2-\alpha_{1})}-t-1-\frac{t^{3}}{6}+\frac{1}{2}t\mathcal{B}(t)-\frac{1}{2}\int_{0}^{t}\mathcal{B}(s)ds,\\ \nonumber
f_{2}(t)=&\frac{1}{2}\frac{t^{1-\alpha_{2}}}{\Gamma(2-\alpha_{2})}+\frac{t}{2}-\frac{t^{4}}{16}+\frac{1}{2}t\mathcal{B}(t)+\frac{1}{2}\mathcal{B}(t)-\frac{1}{2}\int_{0}^{t}\mathcal{B}(s)ds.\nonumber
\end{split}\end{equation}
For this system the exact solution is
\begin{equation}\begin{split}
y_{1}(t)=&t+1,\\ \nonumber
y_{2}(t)=&\frac{t}{2}.
\end{split}\end{equation}
%In Table 2, $ L_{\infty}$-error and $c-order$ of Example 2 is reported.
The absolute error of Example 2  for different value of $ n$ is illustrated in Table 2. The exact and numerical solutions when $\alpha_{1}=\alpha_{2}=0.5$, and $n=16$ for $ \hat{M}=100 $ iterations, are shown in Figure 2. The absolute error for $ n=32 $ and $ \hat{M}=500 $ iterations is depicted in Figure 3. Also, in Figure 4, approximate solutions of $ y_{1}(t)$ and $y_{2}(t) $ for different values of $ \sigma$ with $n=16$ and $\hat{M}=100 $ iterations are displayed.


%\begin{table}
%\begin{center}
%\caption{\scriptsize The table of $ L_{\infty}$-error and $ c-order $ of Example 2 for $n=4,8,16 $.}\label{tab1}
%\begin{tabular}{ c c c c c c c}
%\hline
%\hline
%$ n $ & $ L_{\infty} $-error & $c-order  $ &$ L_{\infty} $-error & $c-order  $  \\
%\hline
%$ 4 $ & $ 0.0296$ & $ --$ & $ 0.0134 $ & $ -- $\\
%$ 8 $ & $ 0.0118$ & $ 1.3268$ & $ 0.0046$ & $ 1.5425 $\\
%$ 16 $ & $ 0.0021 $ &$ 2.4903  $ &$0.0010 $ & $ 2.2016  $ &\\
%\hline
%\hline
%\end{tabular}
%\end{center}
%\end{table}




\begin{table}
	\caption{ The absolute error of Example 2 for $n=16 $ and $ n=32 $.}\vspace*{-0.3cm}
	\begin{center}
		\begin{tabular}{cccccc}
			\hline\hline
			{}&\multicolumn{2}{c}{$n=16$}  &\multicolumn{3}{c}{$n=32$}\\
			\cline{2-3}\cline{5-6}
			${t}$&${y_{1}(t)}$&${y_{2}(t)}$&&${y_{1}(t)}$&${y_{2}(t)}$\\
			\hline
			${0.1}$&${6.6629\times10^{-3}}$&${1.2115\times 10^{-3}}$&&${1.4689\times10^{-3}}$&${7.6930\times 10^{-3}}$\\
 ${0.2}$&${ 4.6417 \times10^{-3}}$&${1.5385\times 10 ^{-2}}$&&$1.7993\times10^{-3}$&$7.4140\times 10^{-3}$\\
${0.3}$&${6.4087\times10^{-3}}$&${1.6887\times 10^{-2}}$&&$2.8465\times10^{-3}$&$9.4791\times 10^{-3}$\\
 ${0.4}$&${7.6164\times10^{-3}}$&${1.6728\times 10^{-2}}$&&$4.0157\times10^{-3}$&$1.0582\times 10^{-2}$\\
 ${0.5}$&${1.0276\times10^{-2}}$&${1.8492\times10^{-2}}$&&$5.3916\times10^{-3}$&$1.1561\times 10^{-2}$\\
 ${0.6}$&${1.3725\times10^{-2}}$&${2.0832\times10^{-2}}$&&$7.2755\times10^{-3}$&$1.2592\times 10^{-2}$\\
 ${0.7}$&${1.7016\times10^{-2}}$&${2.3725\times10^{-2}}$&&$9.4031\times10^{-3}$&$1.3189\times 10^{-2}$\\
 ${0.8}$&${2.1748\times10^{-2}}$&${2.5051\times10^{-2}}$&&$1.1836\times10^{-2}$&$1.4717\times 10^{-2}$\\
 ${0.9}$&${2.5247\times10^{-2}}$&${2.1826\times10^{-2}}$&&$1.4597\times10^{-2}$&$1.6389\times 10^{-2}$\\
 ${1.0}$&${3.1081\times10^{-2}}$&${2.7405\times10^{-2}}$&&$1.8378\times10^{-2}$&$1.7842\times 10^{-2}$\\
			\hline\hline
		\end{tabular}
	\end{center}\label{T0}
\end{table}


\begin{figure}
%\begin{center}
\hspace{-1.4cm}
\includegraphics[width=14cm]{FIG222}
\caption{{\scriptsize The exact  and numerical solution for $ y_{1}(t)$ and $y_{2}(t) $ in Example 2.
% when $\alpha_{1}=0.5 ,\alpha_{2}=0.5$, $n=16$ and $ \hat{M}=100 $.
}}\label{fig2}
%\end{center}
\end{figure}


\begin{figure}
%\begin{center}
\hspace{-1.4cm}
\includegraphics[width=14cm]{fig-exm22}
\caption{{\scriptsize The absolute error of $ y_{1}(t)$ and $y_{2}(t) $ for Example 2 when $\alpha_{1}=\alpha_{2}=0.5$, $n=32$ and $ \hat{M}=500 $.
}}\label{fig2}
%\end{center}
\end{figure}



\begin{figure}
%\begin{center}
\hspace{-1.4cm}
\includegraphics[width=14cm]{fig-exam2}
\caption{{\scriptsize Approximate solutions of $ y_{1}(t)$ and $y_{2}(t) $ in Example 2 for different values of $ \sigma$ with $n=16$, $\hat{M}=100 $ iterations.
}}\label{fig2}
%\end{center}
\end{figure}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%


{\bf{Example 3.}} For the last example, we consider the following system of FSIDEs:
\begin{equation}\label{em2}
\begin{cases}
D^{\alpha_{1}}y_{1}(t)=f_{1}(t)-2y_{1}(t)+\int _{0}^{t}e^{t-s}y_{2}(s)ds+\int _{0}^{t}sty_{3}(s) d\mathcal{B}(s),\\ \\  \nonumber
D^{\alpha_{2}}y_{2}(t)=f_{2}(t)+y_{2}(t)-\int _{0}^{t}ts^{2}y_{3}(s)ds+\int _{0}^{t}sin(s)y_{2}(s)d\mathcal{B}(s),\\ \\ \nonumber
D^{\alpha _{3}}y_{3}(t)=f_{3}(t)-y_{3}(t)+\int _{0}^{t}(t-s)y_{2}(s)ds+\int _{0}^{t}y_{1}(s)d\mathcal{B}(s),
\end{cases}
\end{equation}
in which
\begin{align}\nonumber
f_{1}(t)=\frac{\Gamma(4)t^{(3-\alpha_{1})}}{\Gamma (4-\alpha_{1})}-\frac{\Gamma(5)t^{(4-\alpha_{1})}}{\Gamma(5-\alpha_{1})}+2t^{3}(1-t)+4-4e^{t}+4t+2t^{2}\qquad \qquad\\ \nonumber
 -t\left( te^{t}\mathcal{B}(t)-\int_{0}^{t}e^{s}(1+s)\mathcal{B}(s)ds\right),\qquad\qquad \qquad\qquad\qquad\qquad\qquad\quad\\  \nonumber
f_{2}(t)=\frac{2\Gamma(3)t^{(2-\alpha_2)}}{\Gamma(3-\alpha_{2})}-2t^{2}-2t+te^{t}\left( 2-2t+t^{2}\right)\qquad\qquad\qquad\qquad\qquad\qquad  \\ \nonumber
 -\left( 2t^{2}sin(t)\mathcal{B}(t)-2\int_{0}^{t}\left( -2-(s^{2}-2)cos(s)+2s~sin(s)\right) \mathcal{B}(s)ds\right),\qquad  \\  \nonumber
f_{3}(t)=\frac{e^{t}\Gamma(1-\alpha_{3})-\Gamma(1-\alpha_{3},t)}{\Gamma(1-\alpha_{3})}+e^{t}-\frac{t^{4}}{6}-t^{3}(1-t)\mathcal{B}(t) \qquad\qquad\qquad\quad ~\\ \nonumber
-\int_{0}^{t}(3s^{2}-4s^{3})\mathcal{B}(s)ds,\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\quad
\end{align}
and $ \Gamma(a,z) $ is the incomplete Gamma function. The exact solution of  this system is
\begin{equation}\begin{split}
y_{1}(t)=&t^{3}(1-t),\\  \nonumber
y_{2}(t)=&2t^{2},\\  \nonumber
y_{3}(t)=&e^{t}.
\end{split}\end{equation}
%In this Example, $ L_{\infty}$-error and $ c-order $ is shown in Table 4 .
The numerical results for  $ \alpha_{1}=0.45, \alpha_{2}=0.5, \alpha_{3}=0.7 $, $ n=16$ and $ n=32 $ are reported in Table 3. The exact solution and the numerical solution for \break $ \alpha_{1}=0.24$, $\alpha_{2}=0.45, \alpha_{3}=0.33 $, $ n=16$ and $ \hat{M}=200 $ iterations are depicted in Figure 5. Figure 6 displays
the absolute error of the obtained numerical solution. Also, in Figure 7, the approximate values of $ y_{1}(t)$, $y_{2}(t)$ and $y_{3}(t) $ are illustrated for different values of $ \sigma$ when $n=16$ and $\hat{M}=150 $.

%
%\begin{table}
%\begin{center}
%\caption{\scriptsize The table of $ L_{\infty}$-error and $ c-order $ of Example 3 for $ n=4,8,16 $.}\label{tab1}
%\begin{tabular}{ c c c c c c c}
%\hline
%\hline
%$ n $ & $L_{\infty}$  error& $c-order  $ &$ L_{\infty}$  error&  $c-order  $  &$ L_{\infty} $ error &  $c-order  $  \\
%\hline
%$ 4 $ & $ 0.0215$& $ -- $ & $ 0.0667 $&$ -- $ & $ 0.0128 $ & $ -- $\\
%$ 8 $ & $ 0.0094 $ &$ 1.1936 $& $ 0.0133$ & $ 2.3263 $ & $ 0.0038 $& $ 1.7521$\\
%$ 16 $ & $ 0.0022$ &$ 2.0952 $ &$ 0.0019$ & $ 2.8074 $ &$0.0008 $ & $2.2479 $\\
%
%\hline
%\hline
%\end{tabular}
%\end{center}
%\end{table}



\begin{table}
 \caption{The absolute error of Example 3 for $n=16$ and $ n=32 $.}\vspace*{-0.1cm}
% \begin{center}
\begin{tiny}
 \begin{tabular}{cccccccc}
 \hline\hline
 {$t$}&\multicolumn{3}{c}{   $ n=16$} & &\multicolumn{3}{c}{$ n=32 $}\\
 \cline{2-4}\cline{5-8}
 ${}$& ${y_{1}(t)}$&${y_{2}(t)}$& $ {y_{3}(t)}$& &${y_{1}(t)}$ &${y_{2}(t)}$&${y_{3}(t)}$ \\
 \hline\hline
 ${0.1}$&${1.6212\times10^{-4}}$&${2.8120\times 10^{-4}}$&$9.0697\times10^{-3}$&&${1.0337\times10^{-4}}$&${3.8442\times 10^{-4}}$&$2.0847\times10^{-3}$\\
 ${0.2}$&${ 7.9713 \times10^{-4}}$&${5.0715\times 10 ^{-4}}$&$3.4573\times 10^{-3}$&&$5.9030\times10^{-4}$&$3.0112\times 10^{-4}$&$1.4435\times 10^{-3}$\\
${0.3}$&${2.6256\times10^{-3}}$&${2.2863\times 10^{-3}}$&$2.6693\times 10^{-3}$&&$1.124\times10^{-3}$&$9.4339\times 10^{-3}$&$1.1056\times 10^{-3}$\\
 ${0.4}$&${5.1580\times10^{-3}}$&${5.4707\times 10^{-3}}$&$2.1148\times 10^{-3}$&&$3.3416\times10^{-3}$&$2.9722\times 10^{-3}$&$8.8180\times 10^{-4}$\\
 ${0.5}$&${8.2723\times10^{-3}}$&${1.0649\times10^{-2}}$&$2.0397\times10^{-3}$&&$4.9457\times10^{-3}$&$5.4758\times 10^{-3}$&$9.0121\times 10 ^{-4}$\\
 ${0.6}$&${1.1210\times10^{-2}}$&${1.7771\times 10^{-2}}$&$2.2670\times 10^{-3}$&&$9.1619\times10^{-3}$&$1.1250\times 10^{-2}$&$1.2668\times 10^{-3}$\\
 ${0.7}$&${2.0038\times10^{-2}}$&${3.1139\times 10^{-2}}$&$2.7843 \times 10^{-3}$&&$1.1784\times10^{-2}$&$1.5897\times 10^{-2}$&$1.4935\times 10^{-3}$\\
${0.8}$&${3.4178\times10^{-2}}$&${5.0417\times 10^{-2}}$&$3.1719\times 10^{-3}$&&$1.9795\times10^{-2}$&$2.6911\times 10^{-2}$&$1.8437\times 10^{-3}$\\
${0.9}$&${3.8820\times10^{-2}}$&${7.0928\times 10^{-2}}$&$3.4214\times 10^{-3}$&&$2.5971\times10^{-2}$&$3.8187\times 10^{-2}$&$1.8367\times 10^{-3}$\\
${1.0}$&${5.8870\times10^{-2}}$&${9.2068\times 10^{-2}}$&$3.4609\times 10^{-3}$&&$3.5885\times10^{-2}$&$5.2688\times 10^{-2}$&$1.8613\times 10^{-3}$\\
 \hline
 \end{tabular}
 \end{tiny}
 %\end{center}
 \label{T0}
\end{table}





\begin{figure*}
%\begin{center}
\hspace{-1.2cm}
\includegraphics[width=14cm]{FIG33}
\caption{{\scriptsize The exact and numerical solution of Example 3 when $\alpha_{1}=0.24, \alpha_{2}=0.45, \alpha_{3}=0.33 $, $n=16$ and $ \hat{M}=200 $.
}}\label{fig3}
%\end{center}
\end{figure*}





\begin{figure}
%\begin{center}
\hspace{-1.2cm}
\includegraphics[width=14cm]{fig-exm32}
\caption{{\scriptsize The absolute error of $ y_{1}(t)$, $y_{2}(t)$, and $y_{3}(t)$ in Example 3 when $\alpha_{1}=0.24, \alpha_{2}=0.45, \alpha_{3}=0.33 $, $n=16$ and $ \hat{M}=500 $.
}}\label{fig3}
%\end{center}
\end{figure}





\begin{figure}
%\begin{center}
\hspace{-1.2cm}
\includegraphics[width=14cm]{fig-exm3}
\caption{{\scriptsize Approximate solutions of $ y_{1}(t)$, $y_{2}(t)$, and $y_{3}(t)$ in Example 3 for different value of $ \sigma$ with $n=16$ and $\hat{M}=150 $.
}}\label{fig2}
%\end{center}
\end{figure}



%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
 \section{Conclusion}
 In this work, a numerical approach based on modified hat functions has been used for solving a system of fractional stochastic integro-differential equations. For this purpose, the operational matrices of fractional integral and stochastic integral of  MHFs  have been employed. By using these matrices and the properties of MHFs, the problem is reduced into a linear system of algebric equations.
 %This system can be solved by an iterative method.
 The great feature of this algorithm is that, without any integration, unknown coefficients have been calculated. Error analysis of the method was discussed. Furthermore, several numerical examples have been investigated to illustrate the effectiveness and accuracy of this algorithm.
 
 
 
 
 
 

% Non-BibTeX users please use
\begin{center}
\begin{thebibliography}{99} % Enter references in alphabetical order and according to the following format.
%
\bibitem{Podlubny}I. Podlubny, {\it Fractional Differential Equations}, Academic Press, New York (1999).

\bibitem{Kilbas} A.A Kilbas, H.M. Sirvastava, and J.J. Trujillo, {\it Theory and Applications of Fractional Differential Equations},  Elesiver, Amesterdam (2006).

\bibitem{diethlm} K. Diethelm, {\it The analysis of fractional differential equations: An application-oriented exposition using differential operators of Caputo type},  Springer-Velarge, Berlin Heidelbreg, 2010.

\bibitem{bal11} D. Baleanu, K. Diethelm, E. Scalas, and J. J. Trujillo, {\it Fractional calculus models and numerical methods, Complexity, Nonlinearity and Chaos}, World Scientific, Singapore, 2017.

\bibitem{khalil} H. Khalil and R. A. Khan,  Numerical scheme for solution of coupled system of initial value fractional order Fredholm integro differential equations with smooth solutions, {\it Journal of Mathematical Extension}, 9 (2015), 39--58.


\bibitem{rezapour1} H. Mohammadi, S. Kumar, S. Rezapour, and S. Etemad,   A theoretical study of the Caputo–Fabrizio fractional modeling for hearing loss due to Mumps virus with optimal control, {\it Chaos, Solitons \& Fractals}, 144 (2021),  110668.

\bibitem{tuan} H. Jafari, N. H. Tuan, and  R. M. Ganji,  A new numerical scheme for solving pantograph type nonlinear fractional integro-differential equations, {\it Journal of King Saud University-Science}, 33(1) (2021), 101185.


\bibitem{rezapour2} S. Rezapour, S., F. M. Sakar, S. M. Aydogan, and E. Ravash,  Some results on a system of multiterm fractional integro-differential equations, {\it Turkish Journal of Mathematics}, 44(6) (2020), 2004--2020.

\bibitem{jafari5} H. Jafari, A new general integral transform for solving integral equations, {\it Journal of Advanced Research}, (2020). https://doi.org/10.1016/j.jare.2020.08.016



\bibitem{Ma} X. Ma and Ch. Huang, Numerical solution of fractional integro differential equations by hybrid collocation method, {\it Applied Mathematics and Computation}, 219 (2013), 6750--6760.

% \bibitem{Mashayekhi} S. Mashayekhi, M. Razzaghi, Numerical solution of nonlinear fractional integro differential equations by hybrid functions, Engineering Analysis with Boundary Elements, 56, 81--89 (2015)

\bibitem{Jabari} D. Jabari Sabegh, R. Ezzati, and K. Maleknejad, Approximate solution of fractional integro-differentail equations by least squares method, {\it International Journal of Analysis and Applications}, 17 (2019), 303--310.


\bibitem{jafari1} M. Badr,  A. Yazdani, and H. Jafari, Stability of a finite volume element method for the time‐fractional advection‐diffusion equation,
 {\it   Numerical Methods for Partial Differential Equations}, 34(5) (2018), 1459--1471.
 
 \bibitem{jafari2}
S. Sadeghi, H. Jafari, and S. Nemati,   Operational matrix for Atangana–Baleanu derivative based on Genocchi polynomials for solving FDEs,
    {\it Chaos, Solitons \& Fractals}, 135 (2020),  109736.
     
  \bibitem{jafari3}   N. H. Tuan, S. Nemati, R. M. Ganji, and  H. Jafari, Numerical solution of multi-variable order fractional integro-differential equations using the Bernstein polynomials, {\it Engineering with Computers}, (2020). https://doi.org/10.1007/s00366-020-01142-4
    
%
%
%\bibitem{mogh2} P. Mokhtary, B.P. Moghaddam, A.M. Lopes, J.T. Machado,  A computational approach for the non-smooth solution of non-linear weakly singular Volterra integral equation with proportional delay, {\it Numerical Algorithms}, 83(3) (2020) 987--1006.




 \bibitem{Kloden} P.E. Kloden, and E. Platen, {\it Numerical Solution of Stochastic Differential Equations}, Springer-Velarge, Berlin Heidelbreg (1995).


\bibitem{Oksendal} B. Oksendal, {\it Stochastic Differential Equations: An Introduction with Applications}, 5th edition, Springer-Velarge. Berlin, Germany (1998).


\bibitem{bia} F. Biagini, Y. Hu, B. Oksendal, and T. Zhang, {\it Stochastic calculus for fractional Brownian motion and applications} Springer Science \& Business Media, London, 2008.

 \bibitem{fallah} H. Fallahgoul, S. Focardi, and F. Fabozzi, {\it Fractional calculus and fractional processes with applications to financial economics: Theory and application}, Academic Press, Elsevier, 2016.



\bibitem{meer} M. M. Meerschaert and A. Sikorskii, {\it Stochastic models for fractional calculus}, Walter de Gruyter GmbH \& Co KG, Berlin, 2019.


%
%\bibitem{he} X. He, W. Chen, An approximation formula for the price of credit default swaps under the fast-mean reversion volatility model, {\it  Appl Math} 64  (2019) 367--382.


 
%\bibitem{M1} B. P. Moghadam, L. Zhang, A. M. Lopes, J. A. Tenreiro Machado, Z.S. Mostaghim, Sufficient conditions for existence and uniqueness of fractional stochastic delay differential equations, An International Journal of Probability and Stochastic Processes, DOI: 10.1080/17442508.2019.1625903.



\bibitem{badr} A.A. Badr, and H.S. El-Hoety,  Monte-Carlo Galerkin Approximation of Fractional Stochastic Integro-Differential Equation, {\it  Mathematical Problems in Engineering}, 2012 (2012).


\bibitem{Kamrani} M. Kamrani, Numerical solution of stochastic fractional differential equations, {\it Numerical Algorithms}, 68 (2014), 81--93.


\bibitem{Khodabin} M. Khodabin, K. Maleknejad, and T. Damercheli, Approximate solution of stochastic Volterra integral equations via expansion method, {\it  Int. J. Industeria Mathematics}, 6  (2014), 41--48.

\bibitem{Asghari} M. Asgari,  Block pulse approximation of fractional stochastic integro-differential equation, {\it Commun. Numer. Anal}, 2014 (2014) 1--7.




 \bibitem{Mohammadi} F. Mohammadi, A Chebyshev wavelet operational method for solving stochastic Volterra-Fredholm integral equations, {\it  International Journal of Applied Mathematics Research}, 4 (2)  (2015), 217--227.


\bibitem{Taheri} Z. Taheri, S. Javadi, and E. Babolian, Numerical solution of stochastic fractional integro-differential equation by the specteral collocation method, {\it Journal of Computational and Applied Mathematics}, 321 (2017), 336--347.

 \bibitem{Mirzaee1} F. Mirzaee, S. Alipour, and N. Samadyar, Numerical solution based on hybrid of block-pulse and parabolic fuctions for solving a system of nonlinear stochastic Ito-Volterra integral equations of fractional order, {\it Journal of Computational and Applied Mathematics}, 349  (2019), 157--171.
 
%\bibitem{cardone} A. Cardone, R. D'Ambrosio, B. Paternoster, A spectral method for stochastic fractional differential equations. Applied Numerical Mathematics 139 (2019) 115--119.

\bibitem{Mogh} B. P. Moghadam, A. Mendes Lopes, J. A. Tenreiro Machado, and Z. S. Mostaghim, Computational scheme for solving nonlinear fractional stochastic differential equations with delay, {\it Stochastic Analysis and Applications}, 37 (2019), 893--908.

 
\bibitem{tripathi} M. P. Tripathi, V. K. Baranwal, R. K. Pandey, and O. P. Singh, A new numerical algorithm to solve fractional differential equations based on operational matrix of generalized hat functions, {\it Commun. Nonlinear Sci. Numer. Simul.}, 18 (2013) 1327--1340

 \bibitem{Nemati} S. Nemati and P.M. Lima, Numerical solution of nonlinear fractional integro-differential equations with weakly singular kernels via a modification of hat functions, {\it Applied Mathematics and Computation},  327 (2018), 79--92.


  \bibitem{Momenzade} N. Momenzade, A.R. Vahidi, and E. Babolian, A computational method for solving stochastic Ito Volterra integral equation with multi stochastic terms, {\it Mathematical Sciences}, 12 (2018), 295--303.
  
 \bibitem{Mirzaee2} F. Mirzaee and E. Hadadiyan, Numerical solution of Volterra-Fredholm integral equations via modification of hat functions, {\it Applied Mathematics and Computations}, 280 (2016), 110--123.
 
 



%
%
%\bibitem{RefJ1}
%% Format for Journal Reference
%F. Author and S. Author, Instructions for authors, {\it Journal Name}, Volume (year), pp-pp. 
%
%\bibitem{RefJ2}
%% Format for Journal Reference
%F. Author, S. Author and T. Author, Article title should be written here, {\it Journal Name}, Volume (year), pp-pp.
%
%% Format for books
%\bibitem{RefB}
%F. Author, {\it Book Title Should Be Written Here}, pp-pp, Publisher, place (year)
%% etc

\end{thebibliography}
\end{center}



{\small

\noindent{\bf Elnaz Aryani}

\noindent Department of Applied Mathematics

\noindent Ph.D. Student of Applied Mathematics

\noindent University of Mazandaran

\noindent Babolsar, Iran

\noindent E-mail: aryani@stu.umz.ac.ir}\\

{\small

\noindent{\bf Afshin Babaei}

\noindent Department of Applied Mathematics

\noindent Associate Professor of Applied Mathematics

\noindent University of Mazandaran

\noindent Babolsar, Iran

\noindent E-mail: babaei@umz.ac.ir}\\

{\small
\noindent{\bf  Ali Valinejad  }

\noindent  Department of Computer Science

\noindent Assistant Professor of Mathematics

\noindent University of Mazandaran


\noindent Babolsar, Iran

\noindent E-mail: valinejad@umz.ac.ir}\\



\end{document}