\documentclass[11pt,twoside]{article}
\usepackage{amsmath, amsthm, amscd, amsfonts, amssymb, graphicx, color}
\usepackage[bookmarksnumbered, colorlinks]{hyperref} \usepackage{float}
\usepackage{lipsum}
\usepackage{afterpage}
\usepackage[labelfont=bf]{caption}
\usepackage[nottoc,notlof,notlot]{tocbibind} 
%\renewcommand\bibname{References}
\def\bibname{\Large \bf  References}
\usepackage{lipsum}
\usepackage{fancyhdr}
\pagestyle{fancy}
\fancyhf{}
\renewcommand{\headrulewidth}{0pt}
\fancyhead[LE,RO]{\thepage}
\thispagestyle{empty}
%\afterpage{\lhead{new value}}

\fancyhead[CE]{Z. JAFARIANI , N. KANZI  AND M. NADERI PARIZ} 
\fancyhead[CO]{The Fr\`{e}chet Normal Cone of Optimization Problems with Switching Constraints %\thanks{Grants or other notes      }



%\topmargin=-1.6cm
\textheight 17.5cm%
\textwidth  12cm %
\topmargin   8mm  %
\oddsidemargin   20mm   %
\evensidemargin   20mm   %
\footskip=24pt     %

\newtheorem{theorem}{Theorem}[section]
\newtheorem{lemma}[theorem]{Lemma}
\newtheorem{proposition}[theorem]{Proposition}
\newtheorem{corollary}[theorem]{Corollary}
\theoremstyle{definition}
\newtheorem{definition}[theorem]{Definition}
\newtheorem{example}[theorem]{Example}
\newtheorem{xca}[theorem]{Exercise}
%\theoremstyle{remark}
\newtheorem{remark}[theorem]{Remark}
\renewenvironment{proof}{{\bfseries \noindent Proof.}}{~~~~$\square$}
\makeatletter
\def\th@newremark{\th@remark\thm@headfont{\bfseries}}
\makeatletter



  
  
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% If you want to insert other packages. Insert them here
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

%\long\def\symbolfootnote[#1]#2{\begingroup%
%\def\thefootnote{\fnsymbol{footnote}}\footnote[#1]{#2}\endgroup}



 \def \thesection{\arabic{section}}
 %%%
 \DeclareMathOperator{\RE}{Re}
\DeclareMathOperator{\IM}{Im}
\DeclareMathOperator{\ess}{ess}
\newcommand{\eps}{\varepsilon}
\newcommand{\To}{\longrightarrow}
\newcommand{\h}{\mathcal{H}}
\newcommand{\s}{\mathcal{S}}
\newcommand{\A}{\mathcal{A}}
\newcommand{\B}{\mathcal{B}}
\newcommand{\J}{\mathcal{J}}
\newcommand{\T}{\mathcal{T}}
\newcommand{\M}{\mathcal{M}}
\newcommand{\W}{\mathcal{W}}
\newcommand{\X}{\mathcal{X}}
\newcommand{\BOP}{\mathbf{B}}
\newcommand{\BH}{\mathbf{B}(\mathcal{H})}
\newcommand{\KH}{\mathcal{K}(\mathcal{H})}
\newcommand{\Real}{\mathbb{R}}
\newcommand{\Complex}{\mathbb{C}}
\newcommand{\Field}{\mathbb{F}}
\newcommand{\RPlus}{\Real^{+}}
\newcommand{\Polar}{\mathcal{P}_{\s}}
\newcommand{\Poly}{\mathcal{P}(E)}
\newcommand{\EssD}{\mathcal{D}}
\newcommand{\Lom}{\mathcal{L}}
\newcommand{\States}{\mathcal{T}}
\newcommand{\abs}[1]{\left\vert#1\right\vert}
\newcommand{\set}[1]{\left\{#1\right\}}
\newcommand{\seq}[1]{\left<#1\right>}
\newcommand{\norm}[1]{\left\Vert#1\right\Vert}
\newcommand{\essnorm}[1]{\norm{#1}_{\ess}}
\newcommand{\Aa}{\mathfrak{A}}
\newcommand{\p}[1]{\left(#1\right)}

% MATH -------------------------------------------------------------------
%%% ----------------------------------------------------------------------

\def\wh{\widetilde}
\def\u{\cup }
\def\U{\bigcup}
\def\es{\emptyset}
\def\r{{\Bbb R}}
\def\R{\Rightarrow}
\def\l{\lambda}
\def\L{\Lambda}
\def\a{\alpha}
\def\b{\beta}
\def\g{\gamma}
\def\c{\mathcal{S}}
\def\f{\varphi}
\def\x{\xi}
\def\p{\psi}
\def\v{\varsigma}
\def\s{\sum}
\def\ri{\rightarrow}
\def\h{\hat}
\def\z{\partial}
\newcommand{\se}[1]{\left\{#1\right\}}
\newcommand{\za}[1]{\left<#1\right>}
\def\1{\ldots}
\def\text{\mbox}

\begin{document}
%\baselineskip 9mm
%\setcounter{page}{}
\thispagestyle{plain}
{\noindent Journal of Mathematical Extension \\
Journal Pre-proof}\\
ISSN: 1735-8299\\
URL: http://www.ijmex.com\\
Original Research Paper\\
\vspace*{9mm}

\begin{center}

{\Large \bf 
The Fr\`{e}chet Normal ...Cone of Optimization Problems with Switching Constraints %\thanks{Grants or other notes\\}
%{\bf Do You Have a Subtitle? \\ If so, Write It Here} 


\let\thefootnote\relax\footnote{\scriptsize Received:--- --  ; Accepted: -----    }

{\bf Z. Jafariani}\vspace*{-2mm}\\
\vspace{2mm} {\small  Payame Noor University } \vspace{2mm}

{\bf N. Kanzi}\vspace*{-2mm}\\
\vspace{2mm} {\small  Payame Noor University  } \vspace{2mm}

{\bf  M. Naderi Parizi $^*$\let\thefootnote\relax\footnote{$^*$Corresponding Author}}\vspace*{-2mm}\\
\vspace{2mm} {\small  Payame Noor University } \vspace{2mm}

\end{center}

\vspace{4mm}


{\footnotesize
\begin{quotation}
{\noindent \bf Abstract.} The paper deals with the mathematical programming problems with switching constraints that are defined with continuously differentiable functions. The main results are the upper approximations of the Fr\`{e}chet Normal Cone of the feasible set  for the problem. As applications of the main results, we present some stationary conditions of the considered problem.
\end{quotation}
\begin{quotation}
\noindent{\bf AMS Subject Classification:} 49J52 ; 90C30 ; 90C33 ; 90C46 

\noindent{\bf Keywords and Phrases:} Normal cone , Stationary condition , Constraint qualification , Switching constraints 
\end{quotation}}

\section{Introduction}
%\label{intro} % It is advised to give each section and subsection a unique label.
\emph{Mathematical programming with switching constraints} (MPSC, in brief), as a generalization of  \emph{mathematical programming with vanishing constraints} (briefly, MPVC) and  \emph{mathematical programming with equilibrium constraints} (MPEC, in short), with some of its applications are introduced  in 2019 (\cite{Patric}). Theoretical aspects and a wide range of applications of MPSCs have been studied intensively by many
researchers; see \cite{Gorgini,Gorgini2,KM,LiGuo,LY,LvPeWa,Patric,PN,Shikhman}.

The general form of a MPSC is as
\begin{align*}
	(\Delta):\ \qquad \qquad \min \ &  f(x)\\
	s.t.\qquad &g_j(x)\leq 0,\qquad \qquad j\in J\\
                 &G_i(x)H_i(x)= 0, \quad \  i\in I,
\end{align*}
where the continuously differentiable functions   $ f , g_j, H_i , G_i:\mathbb{R}^n \to  \mathbb{R} $   (for  $j\in J$ and $ i \in I$)  are given and index sets $I$ and $J$ are finite. Throughout this article, we will suppose that the feasible set of $(\Delta)$, denoted by $\c $, is nonempty, i.e.,
\[
	\c =\{x\in \mathbb{R}^n\mid g_j(x)\leq 0,\ j\in J,\ \  G_i(x)H_i(x)=0, \  i\in I \}\neq \es. \]
As we know from \cite[Theorem 6.12]{ROCKA}, if  $x_0 \in \c$ is an optimal solution of $(\Delta)$, we have
\begin{equation}\label{9530}
-\nabla f(x_0) \in N_F(\c,x_0),
\end{equation}
where $ N_F(\c,x_0) $  denotes the Fr\`{e}chet normal cone of $ \c $ at  $x_0 $ (definitions will be described in the next section).
So, the upper estimation of $N_F(\c,x_0) $ with respect to $\nabla g_j(x_0)$, $\nabla G_i(x_0)$, and $\nabla H_i(x_0)$   is required to express the first-order necessary optimality condition for  $(\Delta)$. Since appropriate approximations for  MPVCs and MPECs where made in \cite{AMN,HoKa2,HoKa3,KazKan,KazKanEb,M,PM}, and so far this has not been done for MPSCs, we will address this issue for the first time in this paper.

We organize the paper as follows. In the next section, we provide the preliminary results to be used in the rest of the paper. In Section 3  we will seek to find some suitable upper approximations for Fr\`{e}chet normal cone of the feasible set of  MPSCs under three kinds of constraint qualifications  in Guignard type,  and we will use these approximations to reach the three kinds necessary optimality conditions for $(\Delta)$, named strongly stationary condition, weakly stationary condition, and $M$-stationary condition.

\section{Notations and preliminaries
}
%%%\label{sec:2}
In this section, we overview some notations and preliminary results from \cite{ROCKA}  that will be used throughout this paper.

The set of all non-negative (resp. non-positive) real numbers is shown by $\r_+$ (resp. $\r_-$), and the zero vector in $\Bbb R^n$ is denoted by $0_n$.
For a  non-empty subset $ \Omega   $ of $ \Bbb R^n $, its polar cone is defined as
\[ \Omega^-:=\{x\in \Bbb R^n \mid \langle x,y \rangle \le 0,\ \ \ \forall y\in  \Omega    \},\]
where  $\langle \cdot,\cdot\rangle$ denotes the standard inner-product in $\Bbb R^n$.
Also,  $cone( \Omega)$,  $\mbox{cl}( \Omega)$, and $\overline{cone}( \Omega)$ denote  the convex cone, the closure, and the closed convex cone of $M\subseteq \Bbb R^n$, respectively.  With convention  $ \es^-=\r^n $, it is easy to see (\cite[Section 14]{ROCKA}) that $ \Omega^-$ is a closed convex cone for each $ \Omega\subseteq \r^n$, and
\begin{equation}\label{001}
	\big(\overline{cone}( \Omega)\big)^-= \Omega^-.
\end{equation}
We recall from\cite[Theorem 3.3]{ROCKA} that
\begin{equation}\label{cone}
	cone(\Omega)=\left\{\sum_{\kappa=1}^s\alpha_\kappa \omega_\kappa\mid s\in \Bbb{N},\ \alpha_\kappa\geq 0, \ \omega_\kappa\in \Omega\right\}.
\end{equation}
The following theorems are recalled from \cite{ROCKA}.
\begin{theorem}\label{coco}
	If $ \Omega$ is finite, then $cone( \Omega)$ is closed.
\end{theorem}
\begin{theorem}\label{tq4}
Suppose that $\Omega_{1}$ and $\Omega_2$ are nonempty closed convex cones in $\r^n$.   Then
\[
\left(\Omega_{1}\cup \Omega_2\right)^-=\Omega_{1}^-\cap \Omega_{2}^-,\quad \mbox{and}\quad \left(\Omega_{1}\cap \Omega_2\right)^-=\emph{cl}\big(\Omega_1^-+\Omega_2^-\big).\]
\end{theorem}
\begin{theorem}\label{tq3}
If the linear function $\f:\Bbb R^n \to \Bbb R$  is defined as $\f(x)=\langle a_0,x \rangle$ for a given $a_0\in \Bbb R^n $, and  $ \Omega \subseteq \Bbb R$ is a given convex  set, then
\[
\left(\f^{-1}\big(cl( \Omega)\big)\right)^-= \Omega^-a_0,\]
in which $\f^{-1}\big(cl( \Omega)\big):=\{x\in \Bbb R^n \mid \f(x)\in cl( \Omega) \}$.
\end{theorem}
The Bouligand tangent cone  and the Fr\'{e}chet  normal cone  of $  \Omega \neq \emptyset $ at $x_0 \in  \Omega$ are respectively defined as
\[
\Gamma( \Omega,x_0):=\Big\{v\in \Bbb R^n \mid \exists t_s \downarrow 0,\ \exists v_s \rightarrow v\ \text{such that}\  x_0+t_s v_s \in  \Omega \ \ \forall s\in \mathbb{N}  \Big\} ,\]
\[
 N_F( \Omega,x_0):=\Big(\Gamma( \Omega,x_0)\Big)^-.\]


\section{Main Results }
As the starting point of this section, we state the following technical theorem that has a key rule in this paper.
\begin{theorem}\label{T31}
If $K:=\big\{ y\in \r^q \mid \mathbf{A}y\leq 0_p,\ \mathbf{B}y=0_r\big\}$ for some matrices $\mathbf{A}\in \r^{p\times q}$ and  $\mathbf{B}\in \r^{r\times q}$, then
\[
K^-=\Big\{\mathbf{A}^\top \mu +\mathbf{B}^\top \eta \mid \eta\in \r^r,\ \mu\in \r^p_+ \Big\}.\]
\end{theorem}
\begin{proof}
Let
\[
\mathbf{A}:=\left[
  \begin{array}{ccc}
    a_{11}& ... & a_{1q} \\
    \vdots & \ & \vdots \\
     a_{p1}& ... & a_{pq}
  \end{array}
\right]_{p\times q}\quad \mbox{and}\qquad \mathbf{B}:=\left[
  \begin{array}{ccc}
    b_{11}& ... & b_{1q} \\
    \vdots & \ & \vdots \\
     b_{r1}& ... & a_{rq}
  \end{array}
\right]_{r\times q}.\]
Also,  for each $i=1,...,p$ and $j=1,...,r$ put
\[
A_i:=\left[\begin{array}{c}
                             a_{i1} \\
                             \vdots \\
                             a_{iq}\\
                             \end{array}
                                         \right] \qquad \mbox{and} \qquad  B_j:=\left[\begin{array}{c}
                             b_{j1} \\
                             \vdots \\
                             b_{jq}\\
                             \end{array}
                                         \right].\]
So, we can write $K$ as follows:
\[
K=\bigg\{y\in \r^q \left|
  \begin{array}{ccc}
   \langle A_i,y\rangle \leq 0,\quad \forall i=1,...,p\\
    \ \\
        \langle B_j,y\rangle = 0,\quad \forall j=1,...,r
  \end{array}
\right.   \bigg\}.
\]
We define the linear functions $\f_i, \p_j :\r^q\to \r$, for $i\in \{1,...,p\}$ and $j\in \{1,...,r\}$, as
\[
\f_i(y):=  \langle A_i,y\rangle ,\qquad \mbox{and} \qquad \p_j:=  \langle B_j,y\rangle,\]
and rewrite $K$ as follows:
\begin{eqnarray*}
% \nonumber to remove numbering (before each equation)
  K &=& \Big\{y\in \r^q \mid \f_i(y)\in \r_-,\ \ \forall i=1,...,p\quad \mbox{and} \quad \p_j(y) = 0,\ \ \forall j=1,...,r\Big\}\\
  &=& \Big(\bigcap_{i=1}^p \f_i^{-1}(\r_-)  \Big)\cap  \Big(\bigcap_{j=1}^r \p_j^{-1}(\{0\})  \Big).
  \end{eqnarray*}
This equality and Theorem \ref{tq4} conclude that
\begin{equation}\label{E31}
  K^-=\mbox{cl}\Big(\sum_{i=1}^p\big(\f_i^{-1}(\r_-)\big)^- ~+~\sum_{j=1}^r \big(\p_j^{-1}(\{0\})\big)^-\Big).
\end{equation}
Now, owing to Theorem \ref{tq3}, we get
\begin{eqnarray}\label{E32}
% \nonumber to remove numbering (before each equation)
  \sum_{i=1}^p\big(\f_i^{-1}(\r_-)\big)^- &=& \sum_{i=1}^p(\r_-)^- A_i=\sum_{i=1}^p\r_+A_i \qquad \qquad\nonumber\\
   &=& \Big\{\sum_{i=1}^p\mu_iA_i \mid \mu_i\geq 0,\ i=1,...,p  \Big\} \nonumber\\
   &=& \Big\{\sum_{i=1}^p\mu_i\left[\begin{array}{c}
                             a_{i1} \\
                             \vdots \\
                             a_{iq}\\
                             \end{array}
                                         \right] \mid \mu_i\geq 0,\ i=1,...,p  \Big\}\nonumber \\
  &=&\bigg\{\left[\begin{array}{c}
                            \mu_1 a_{11}+ \mu_2 a_{21}+...+  \mu_p a_{p1} \\
                             \vdots \\
                            \mu_1 a_{1q}+ \mu_2 a_{2q}+...+  \mu_p a_{pq}\\
                             \end{array}
                                         \right] \mid \mu_i\geq 0,\ i=1,...,p  \bigg\}\nonumber \\
   &=&\bigg\{ \left[
  \begin{array}{ccc}
    a_{11}& ... & a_{p1} \\
    \vdots & \ & \vdots \\
     a_{1q}& ... & a_{qp}
  \end{array}
\right]\left[\begin{array}{c}
                             \mu_{1} \\
                             \vdots \\
                             \mu_{p}\\
                             \end{array}
                                         \right] \mid \mu_i\geq 0,\ i=1,...,p \bigg\}\nonumber\\
&=&\Big\{\mathbf{A}^\top \mu \mid \mu\in \r^p_+\Big\}.
\end{eqnarray}
Similarly, we have
\begin{eqnarray*}
% \nonumber to remove numbering (before each equation)
  \sum_{j=1}^r\big(\p_i^{-1}(\{0\})\big)^- &=& \sum_{j=1}^r(\{0\})^- B_i=\sum_{j=1}^r\r B_i \qquad \qquad\nonumber\\
     &=&\bigg\{\left[\begin{array}{c}
                            \eta_1 b_{11}+ \eta_2 b_{21}+...+  \eta_r b_{r1} \\
                             \vdots \\
                            \eta_1 b_{1q}+ \eta_2 b_{2q}+...+  \eta_r b_{rq}\\
                             \end{array}
                                         \right] \mid \eta_j\in \r,\ j=1,...,r  \bigg\}\nonumber \\
   &=&\bigg\{ \left[
  \begin{array}{ccc}
    b_{11}& ... & b_{r1} \\
    \vdots & \ & \vdots \\
     b_{1q}& ... & b_{rq}
  \end{array}
\right]\left[\begin{array}{c}
                             \eta_{1} \\
                             \vdots \\
                             \eta_{r}\\
                             \end{array}
                                         \right] \mid \eta_j\in \r,\ j=1,...,r \bigg\}\nonumber\\
&=&\Big\{\mathbf{B}^\top \eta \mid \eta\in \r^r \Big\}.
\end{eqnarray*}
The above equality, (\ref{E31}), and (\ref{E32}) imply that
\begin{equation}\label{E33}
K^-=\mbox{cl}\bigg(\Big\{\mathbf{A}^\top \mu +\mathbf{B}^\top \eta \mid \eta\in \r^r,\ \mu\in \r^p_+\Big\}\bigg).
\end{equation}
On the other hand, considering (\ref{cone}), we deduce that
\begin{align*}
   & cone\Big(\big\{A_i, B_j, -B_j \mid i=1,...,p, \ j=1,...,r  \big\}  \Big)= \\
   &\Big\{\sum_{i=1}^p\mu_i A_i+ \sum_{j=1}^r\h \eta_j B_i+\sum_{j=1}^r\bar \eta_j (- B_i)\mid \mu_i,\h \eta_j, \bar \eta_j \geq 0, \   i=1,...,p, \ j=1,...,r \Big\}=\\
   &\Big\{\mathbf{A}^\top \mu +\mathbf{B}^\top \eta \mid \eta\in \r^r,\ \mu\in \r^p_+\Big\},
\end{align*}
in which $\eta_j:=\h \eta_j-\bar \eta_j$ for all $j=1,...,r$. The above equality, Theorem \ref{coco}, and finiteness of set $\big\{A_i, B_j, -B_j \mid i=1,...,p, \ j=1,...,r  \big\}$ conclude that the following set is closed:
\[
\Big\{\mathbf{A}^\top \mu +\mathbf{B}^\top \eta \mid \eta\in \r^r,\ \mu\in \r^p_+\Big\}.\]
Consequently, (\ref{E33}) implies
\[
K^-=\Big\{\mathbf{A}^\top \mu +\mathbf{B}^\top \eta \mid \eta\in \r^r,\ \mu\in \r^p_+\Big\},\]
as required.
\end{proof}\\
Considering a feasible point $ \hat{x}\in \c $ (this point will be fixed throughout this paper), we define the following index sets:
\begin{align*}
     &J_0:=\{j\in J \mid \ g_j(\h x)=0\},\\
	&I_{G}:=\{i\in I\mid\ G_i(\hat{x})=0,\  H_i(\hat{x})\neq 0\}, \\
	&I_{H}:=\{i\in I\mid \ G_i(\hat{x})\neq 0,\  H_i(\hat{x})=0\}, \\
	&I_{GH}:=\{i\in I\mid G_i(\hat{x})=0 ,\ H_i(\hat{x})=0\}.
\end{align*}
Suppose that  the constraints of $(\Delta)$  have the following order:
\begin{align*}
	 &g_1,g_2,...,g_{_{|J_0|}},g_{_{|J_0|+1}},...,g_{_{|J|}},\\
                 &G_1H_1, G_2H_2,...,G_{_{|I_G|}}H_{_{|I_G|}},G_{_{|I_G|+1}}H_{_{|I_G|+1}},...,G_{_{|I_G|+|I_H|}}H_{_{|I_G|+|I_H|}},...,G_{_{|I|}}H_{_{|I|}}.
\end{align*}
Motivated by \cite{Gorgini,Patric}, we define the following Guignard type constraint qualifications for MPSCs.
\begin{definition} We say that $(\Delta)$ satisfies
\begin{itemize}
  \item the Guignard constraint qualification (GCQ) at $\h x \in \c$ if
  \[
   L:=\bigg\{w\in \r^n\left | \begin{array}{ll}
                  \langle w,\nabla g_j(\h x) \rangle\leq 0,\ & j\in J_0\\
                    \langle w,\nabla G_i(\h x) \rangle =0,\ & i\in I_{G}\\
                    \langle w,\nabla H_i(\h x) \rangle =0,\ & i\in I_{H}
                 \end{array}
                 \right.\bigg\}~\subseteq ~\overline{cone}\big( \Gamma(\c,\h x) \big).\]
  \item the weak-GCQ (WGCQ) at $\h x \in \c$ if
 \[
  L_1:= \bigg\{w\in \r^n\left | \begin{array}{ll}
                  \langle w,\nabla g_j(\h x) \rangle\leq 0,\ & j\in J_0\\
                    \langle w,\nabla G_i(\h x) \rangle =0,\ & i\in I_{G}\cup I_{GH}\\
                    \langle w,\nabla H_i(\h x) \rangle =0,\ & i\in I_{H}\cup I_{GH}
                 \end{array}
                 \right.\bigg\}~\subseteq ~\overline{cone}\big( \Gamma(\c,\h x) \big).\]
   \item the MPSC-GCQ at $\h x \in \c$ if
  \[
  L_{2}:=\bigg\{w\in \r^n\left | \begin{array}{ll}
                  \langle w,\nabla g_j(\h x) \rangle\leq 0,\ & j\in J_0\\
                    \langle w,\nabla G_i(\h x) \rangle =0,\ & i\in I_{G}\\
                    \langle w,\nabla H_i(\h x) \rangle =0,\ & i\in I_{H}\\
                    \langle w,\nabla G_i(\h x) \rangle \langle w,\nabla H_i(\h x)\rangle= 0,\ & i\in I_{GH}
                 \end{array}
                 \right.\bigg\}~\subseteq ~\overline{cone}\big( \Gamma(\c,\h x) \big).\]
  \end{itemize}
\end{definition}
It should be noted that the clear inclusions $L_1 \subseteq L_2 \subseteq L$ imply that the following implications are true at $\h x$:
\begin{equation}\label{E30}
\mbox{GCQ} ~ \Longrightarrow ~\mbox{MPSC-GCQ} ~\Longrightarrow ~\mbox{WGCQ}.
\end{equation}
The following theorem introduces a broad and important class of MPSCs that satisfy  MPSC-GCQ (and hence, WGCQ) at all of their feasible
points.
 \begin{theorem}\label{T55}
Consider the following optimization problem  with linear switching constraints:
\begin{align*}
(\Theta):\qquad \min\ &f(x)\\
s.t.\ \ \  & \langle u_j,x\rangle \leq0,\qquad \quad \  j\in J,\\
           & \langle p_i,x\rangle \langle q_i,x\rangle =0,\quad i\in I,\\
           &x\in \r^n,
\end{align*}
where, $u_j,\ p_i$, and $q_i$ are nonzero vectors in $\r^n$ for all $j\in J$ and $i\in I$. This problem satisfies MPSC-GCQ at all of its feasible points.
\end{theorem}
\begin{proof}
At the first point, we recall that the considered problem can be written as $(\Delta)$ with the following data:
\[
g_j(x)=\langle u_j,x\rangle,\quad G_i(x)= \langle p_i,x\rangle,\quad  H_i(x)= \langle q_i,x\rangle.\]
Suppose that $\h x \in \c$ and
\begin{equation}\label{E50}
 w\in L_2=\bigg\{w\in \r^n\left | \begin{array}{ll}
                  \langle w,u_j \rangle\leq 0,\ & j\in J_0\\
                    \langle w,p_i \rangle =0,\ & i\in I_{G}\\
                    \langle w,q_i \rangle =0,\ & i\in I_{H}\\
                     \langle w,p_i \rangle \langle w,q_i\rangle= 0,\ & i\in I_{GH}
                 \end{array}
                 \right.\bigg\},
\end{equation}
are  arbitrarily given. Let $\c_1:=\{x\in \r^n \mid  \langle x,u_j \rangle\leq 0,\ j\in J\}$ and $\c_2:=\{  x\in \r^n \mid  \langle x,p_i\rangle\langle x,q_i\rangle= 0,\ i\in I\}.$ If $t\geq 0$ and $j\in J_0$ are given, we have
\begin{equation}\label{E51}
  \langle u_j,\h x +tw\rangle=\underbrace{\langle u_j,\h x \rangle}_{=0}+t\underbrace{\langle u_j,w\rangle}_{\leq 0}\leq 0.
\end{equation}
 If $j\in J\setminus J_0$, then $\langle u_j,\h x \rangle <0$, and so for some small positive positive $t\geq0$ we have $\langle u_j,\h x \rangle+t\langle u_j,w\rangle\leq 0$, i.e., there exists a $\delta_j >0$ such that
 \[
   \langle u_j,\h x +tw\rangle \leq 0,\quad \quad \mbox{for}\ t\in [0,\delta_j).\]
   This inequality and (\ref{E51}) imply that for all $0\leq t<\delta:=\min \{\delta_j \mid j\in J\setminus J_0\}>0$ and all $j\in J$, we have  $\langle u_j,\h x +tw\rangle \leq0$, and so
   \begin{equation}\label{E52}
     \h x +tw \in \c_1, \quad \quad \mbox{for}\ t\in [0,\delta).
   \end{equation}
On the other hand, for all $t>0$  we get
\begin{align*}
   & \big( \langle p_i, \h x+tw \rangle \big)\big( \langle q_i, \h x+tw \rangle \big)= \\
   & \quad \underbrace{\langle p_i, \h x \rangle\langle q_i, \h x \rangle}_{(a)}+t\underbrace{\langle p_i, \h x \rangle\langle q_i, w \rangle}_{(b)}+t\underbrace{\langle p_i, w \rangle\langle q_i, \h x \rangle}_{(c)}+t^2\underbrace{\langle p_i, w \rangle\langle q_i, w \rangle}_{(d)}.
\end{align*}
For $i\in I_G$, we have $(a)=(b)=0$, and $(c)=(d)=0$ by $w\in L_2$. For $i\in I_H$, we have $(a)=(c)=0$, and $(b)=(d)=0$ by $w\in L_2$. For $i\in I_{GH}$, we have $(a)=(b)=(c)=0$, and $(d)=0$ by $w\in L_2$. Thus,
\[
\big( \langle p_i, \h x+tw \rangle \big)\big( \langle q_i, \h x+tw \rangle \big)=0,\quad \forall t\geq 0,\ i\in I,\]
and hence $ \h x +tw \in \c_2$. This inclusion with (\ref{E52}) and $\c=\c_1 \cap \c_2$ deduces that
\[
 \h x +tw \in \c, \quad \quad \mbox{for}\ t\in [0,\delta),\]
and hence $w\in \Gamma(\c,\h x)$. Since $w$ is an arbitrary element in $L_2$ and $ \Gamma(\c,\h x) \subseteq \overline{cone}\big( \Gamma(\c,\h x) \big)$, we get $L_2 \subseteq \overline{cone}\big( \Gamma(\c,\h x) \big)$, as required. 
\end{proof}
The following example shows that  the GCQ may not hold at the optimal solution of problem $(\Theta)$ in Theorem \ref{T55}.
\begin{example}
Consider the following optimization problem:
\begin{align*}
\min \ &-3x_1-4x_2\\
s.t.\ \ \  & x_1+x_2=0,\\
           &x_1x_2.
\end{align*}
We can formalize this problem as $(\Theta)$ with the following data,
\[
f(x_1,x_2)=\langle \left[
                     \begin{array}{c}
                       -3 \\
                       -4 \\
                     \end{array}
                   \right]
,\left[
           \begin{array}{c}
             x_1 \\
             x_2 \\
           \end{array}
         \right]\rangle
 ,\quad u_1(x_1,x_2)=\langle \left[
                     \begin{array}{c}
                       1 \\
                       1 \\
                     \end{array}
                   \right]
,\left[
           \begin{array}{c}
             x_1 \\
             x_2 \\
           \end{array}
         \right]\rangle\]
\[
p_1(x_1,x_2)=\langle \left[
                     \begin{array}{c}
                       1 \\
                       0\\
                     \end{array}
                   \right]
,\left[
           \begin{array}{c}
             x_1 \\
             x_2 \\
           \end{array}
         \right] \rangle,\quad q_1(x_1,x_2)=\langle \left[
                     \begin{array}{c}
                       0\\
                       1 \\
                     \end{array}
                   \right]
,\left[
           \begin{array}{c}
             x_1 \\
             x_2 \\
           \end{array}
         \right]\rangle.\]
Since $\c=\big(\r_-\times \{0\} \big)\cup \big(\{0\}\times \r_- \big)$, the optimal value of problem is attained at $\h x=0_2$. Clearly, $I=J=J_0=I_{GH}$, and
\[
L=\Big\{\left[
           \begin{array}{c}
             w_1 \\
             w_2 \\
           \end{array}
         \right]\in \r^2 \mid w_1+w_2 \leq 0\Big\}\nsubseteq\r_-\times \r_-=\overline{cone}\big( \Gamma(\mathcal{S},0_2)\big).\]
Thus, GCQ does not hold at $\h x$. Note that, since
\[
L_2=\Big\{\left[
           \begin{array}{c}
             1 \\
            1 \\
           \end{array}
         \right],\ \left[
           \begin{array}{c}
            1 \\
             0 \\
           \end{array}
         \right] ,\ \left[
           \begin{array}{c}
            0 \\
            1 \\
           \end{array}
         \right],\ \left[
           \begin{array}{c}
            -1 \\
            0 \\
           \end{array}
         \right],\ \left[
           \begin{array}{c}
             0 \\
             -1 \\
           \end{array}
         \right]\Big\}^-=\{0_2\},\]
the MPSC-GCQ (and hence, WGCQ) holds at $\h x$.
\end{example}
Now, we can present our main results.
\begin{theorem}\label{T32}
Suppose that GCQ holds at $\h x$. Then
\[
N_F(\c,\h x)\subseteq\bigg\{\sum_{j=1}^{|J_0|}\l_j\nabla g_j(\h x)+\sum_{i=1}^{|I|}\Big(\a_i\nabla G_i(\h x)+\b_i\nabla H_i(\h x)\Big)\left | \begin{array}{ll}
                   \l_j \geq 0,\ & j\in J_0\\
                    \a_i =0,\ & i\in I_{H} \cup I_{GH}\\
                   \b_i=0,\ &  i\in I_{G} \cup I_{GH}
                 \end{array}
                 \right.\bigg\}.
\]
\end{theorem}
\begin{proof}
According to GCQ and (\ref{001}), we have
\begin{equation}\label{E34}
  N_F(\c,\h x)=\big(\Gamma(\c,\h x) \big)^-=\Big(\overline{cone}\big(\Gamma(\c,\h x)\big) \Big)^-\subseteq L^-.
\end{equation}
Let
\[
\mathbf{g}:=\left[
  \begin{array}{ccc}
    \frac{\z g_1}{\z x_1}(\h x)& ... & \frac{\z g_1}{\z x_n}(\h x) \\
    \vdots & \ & \vdots \\
     \frac{\z g_{_{|J_0|}}}{\z x_1}(\h x)& ... &   \frac{\z g_{_{|J_0|}}}{\z x_n}(\h x)
  \end{array}
\right]_{|J_0|\times n},\quad \mathbf{G}:=\left[
  \begin{array}{ccc}
    \frac{\z G_1}{\z x_1}(\h x)& ... & \frac{\z G_1}{\z x_n}(\h x) \\
    \vdots & \ & \vdots \\
     \frac{\z G_{_{|I_G|}}}{\z x_1}(\h x)& ... &   \frac{\z G_{_{|I_G|}}}{\z x_n}(\h x)
  \end{array}
\right]_{|I_G|\times n},\]
\[
\mathbf{H}:=\left[
  \begin{array}{ccc}
    \frac{\z H_{_{|I_G|+1}}}{\z x_1}(\h x)& ... & \frac{\z H_{_{|I_G|+1}}}{\z x_n}(\h x) \\
    \vdots & \ & \vdots \\
     \frac{\z H_{_{|I_G|+|I_H|}}}{\z x_1}(\h x)& ... &   \frac{\z H_{_{|I_G|+|I_H|}}}{\z x_n}(\h x)
  \end{array}
\right]_{|I_H|\times n}.
 \]
 Owing to
\[
L=\Big\{w\in \r^n \mid \mathbf{g}w\leq0_{_{|J_0|}},\ \mathbf{G}w=0_{_{|I_G|}}, \ \mathbf{H}w=0_{_{|I_H|}}\Big\},\]
and Theorem \ref{T31}, we deduce that
\begin{equation}\label{E35}
L^-=\Big\{\mathbf{g}^\top \l +\mathbf{G}^\top \a+\mathbf{H}^\top \b \mid \a\in \r^{|I_G|},\ \b\in \r^{|I_H|},\ \l\in \r^{|J_0|}_+\Big\}.
\end{equation}
Since
\begin{align*}
   & \mathbf{g}^\top \l +\mathbf{G}^\top \a+\mathbf{H}^\top \b= \\
  & \left[
      \begin{array}{c}
       \l_1 \frac{\z g_1}{\z x_1}(\h x)+...+\l_{_{|J_0|}} \frac{\z g_{_{|J_0|}}}{\z x_1}(\h x) \\
        \vdots \\
          \l_1 \frac{\z g_1}{\z x_n}(\h x)+...+\l_{_{|J_0|}} \frac{\z g_{_{|J_0|}}}{\z x_n}(\h x)\\
      \end{array}
    \right]+\left[
      \begin{array}{c}
       \a_1 \frac{\z G_1}{\z x_1}(\h x)+...+\a_{_{|I_G|}} \frac{\z G_{_{|I_G|}}}{\z x_1}(\h x) \\
        \vdots \\
          \a_1 \frac{\z G_1}{\z x_n}(\h x)+...+\a_{_{|I_G|}} \frac{\z G_{_{|I_G|}}}{\z x_n}(\h x)\\
      \end{array}
    \right]+\\
    &\qquad \qquad \qquad \left[
      \begin{array}{c}
       \b_1 \frac{\z H_{_{|I_G|+1}}}{\z x_1}(\h x)+...+\b_{_{|I_G|+1}} \frac{\z H_{_{|I_G|+1}}}{\z x_1}(\h x) \\
        \vdots \\
          \b_1 \frac{\z H_{_{|I_G|+|I_H|}}}{\z x_n}(\h x)+...+\b_{_{|I_G|+|I_H|}} \frac{\z H_{_{|I_G|+|I_H|}}}{\z x_n}(\h x)\\
      \end{array}
    \right]=\\
    & \ \\
    &\sum_{j=1}^{|J_0|}\l_j\left[
           \begin{array}{c}
             \frac{\z g_j}{\z x_1}(\h x) \\
             \vdots\\
             \frac{\z g_j}{\z x_n}(\h x) \\
           \end{array}
         \right]+\sum_{i=1}^{|I_G|}\a_i\left[
           \begin{array}{c}
             \frac{\z G_i}{\z x_1}(\h x) \\
             \vdots\\
             \frac{\z G_i}{\z x_n}(\h x) \\
           \end{array}
         \right]+\sum_{i=|I_G|}^{|I_G|+|I_H|}\b_i\left[
           \begin{array}{c}
             \frac{\z H_i}{\z x_1}(\h x) \\
             \vdots\\
             \frac{\z H_i}{\z x_n}(\h x) \\
           \end{array}
         \right]=\\
         & \sum_{j=1}^{|J_0|}\l_j\nabla g_i(\h x)+\sum_{i=1}^{|I_G|}\a_i \nabla G_i(\h x)+\sum_{i=|I_G|}^{|I_G|+|I_H|}\b_i\nabla H_i(\h x),
\end{align*}
by taking $\a_i=0$ for $i\in I_H \cup I_{GH}$ and $\b_i=0$ for $i\in I_G \cup I_{GH}$, we have
\[
\mathbf{g}^\top \l +\mathbf{G}^\top \a+\mathbf{H}^\top \b=  \sum_{j=1}^{|J_0|}\l_j\nabla g_i(\h x)+\sum_{i=1}^{|I|}\Big(\a_i \nabla G_i(\h x)+\b_i\nabla H_i(\h x)\Big).\]
This equality, (\ref{E34}), and (\ref{E35}) imply that
\[
N_F(\c,\h x)\subseteq\bigg\{\sum_{j=1}^{|J_0|}\l_j\nabla g_j(\h x)+\sum_{i=1}^{|I|}\Big(\a_i\nabla G_i(\h x)+\b_i\nabla H_i(\h x)\Big)\left | \begin{array}{ll}
                   \l_j \geq 0,\ & j\in J_0\\
                    \a_i =0,\ & i\in I_{H} \cup I_{GH}\\
                   \b_i=0,\ &  i\in I_{G} \cup I_{GH}
                 \end{array}
                 \right.\bigg\}.
\]
\end{proof}\\
Since the proof of the following theorem is exactly the same as Theorem \ref{T32}, we do not repeat it.
\begin{theorem}\label{T33}
Suppose that WGCQ holds at $\h x$. Then
\[
N_F(\c,\h x)\subseteq\bigg\{\sum_{j=1}^{|J_0|}\l_j\nabla g_j(\h x)+\sum_{i=1}^{|I|}\Big(\a_i\nabla G_i(\h x)+\b_i\nabla H_i(\h x)\Big)\left | \begin{array}{ll}
                   \l_j \geq 0,\ & j\in J_0\\
                    \a_i =0,\ & i\in I_{H}\\
                   \b_i=0,\ &  i\in I_{G}
                 \end{array}
                 \right.\bigg\}.
\]
\end{theorem}
Because $L_2$ does not have a representation like set $K$ in Theorem \ref{T31}, the  proof of the following theorem is not exactly the same as Theorems \ref{T32} and \ref{T33}.
\begin{theorem}\label{T34}
Suppose that MPSC-GCQ holds at $\h x$. Then
\[
N_F(\c,\h x)\subseteq\bigg\{\sum_{j=1}^{|J_0|}\l_j\nabla g_j(\h x)+\sum_{i=1}^{|I|}\Big(\a_i\nabla G_i(\h x)+\b_i\nabla H_i(\h x)\Big)\left | \begin{array}{ll}
                   \l_j \geq 0,\ & j\in J_0\\
                    \a_i =0,\ & i\in I_{H} \\
                   \b_i=0,\ &  i\in I_{G}\\
                   \a_i\b_i=0,\ & i\in I_{GH}
                 \end{array}
                 \right.\bigg\}.
\]
\end{theorem}
\begin{proof}
At starting of the proof, we observe that
\begin{align*}
   & \big\{w\in \r^n \mid  \langle w,\nabla G_i(\h x) \rangle \langle w,\nabla H_i(\h x)\rangle= 0,\  i\in I_{GH}\big\}= \\
   & \big\{w\in \r^n \mid  \exists \tilde I \subseteq I_{GH}, \langle w,\nabla G_i(\h x) \rangle =0\ \mbox{for}\ i\in \tilde I, \ \langle w,\nabla H_i(\h x)\rangle= 0\ \mbox{for}\ i\in I_{GH}\setminus \tilde I\big\}.
\end{align*}
Thus,
\[
   L_2=\bigg\{w\in \r^n\mid \exists \tilde I \subseteq I_{GH}\ \left | \begin{array}{ll}
                  \langle w,\nabla g_j(\h x) \rangle\leq 0,\ & j\in J_0\\
                    \langle w,\nabla G_i(\h x) \rangle =0,\ & i\in I_{G}\cup \tilde I\\
                    \langle w,\nabla H_i(\h x) \rangle =0,\ & i\in I_{H}\cup \big(I_{GH}\setminus \tilde I \big)
                 \end{array}
                 \right.\bigg\},\]
and similar to proof of Theorem \ref{E32} we conclude that
dot();\begin{align}\label{E38}
                   &  L_2^- = \bigg\{\sum_{j=1}^{|J_0|}\l_j\nabla g_j(\h x)+\sum_{i=1}^{|I|}\Big(\a_i\nabla G_i(\h x)+\b_i\nabla H_i(\h x)\Big)\mid \nonumber\\
                   & \qquad \qquad \qquad \qquad  \qquad \qquad \exists \tilde I \subseteq I_{GH}\left | \begin{array}{ll}
                   \l_j \geq 0,\ & j\in J_0\\
                    \a_i =0,\ & i\in I_{H} \cup \big(I_{GH}\setminus \tilde I \big)\\
                   \b_i=0,\ &  i\in I_{G} \cup \tilde I
                 \end{array}
                 \right.\bigg\}\nonumber\\
                 &\subseteq \bigg\{\sum_{j=1}^{|J_0|}\l_j\nabla g_j(\h x)+\sum_{i=1}^{|I|}\Big(\a_i\nabla G_i(\h x)+\b_i\nabla H_i(\h x)\Big)\left | \begin{array}{ll}
                   \l_j \geq 0,\ & j\in J_0\\
                    \a_i =0,\ & i\in I_{H} \\
                   \b_i=0,\ &  i\in I_{G}\\
                   \a_i\b_i=0,\ & i\in I_{GH}
                 \end{array}
                 \right.\bigg\}.
                \end{align}
Now, MPSC-GCQ, (\ref{001}), and (\ref{E38}) conclude that
\begin{align*}
   & N_F(\c,\h x)=\big(\Gamma(\c,\h x) \big)^-=\Big(\overline{conv}\big(\Gamma(\c,\h x)\big) \Big)^-\subseteq L_2^-\subseteq  \\
   & \bigg\{\sum_{j=1}^{|J_0|}\l_j\nabla g_j(\h x)+\sum_{i=1}^{|I|}\Big(\a_i\nabla G_i(\h x)+\b_i\nabla H_i(\h x)\Big)\left | \begin{array}{ll}
                   \l_j \geq 0,\ & j\in J_0\\
                    \a_i =0,\ & i\in I_{H} \\
                   \b_i=0,\ &  i\in I_{G}\\
                   \a_i\b_i=0,\ & i\in I_{GH}
                 \end{array}
                 \right.\bigg\},
\end{align*}
and the proof is complete.
\end{proof}\\
As applications of the above theorems, we state the KKT type necessary optimality condition for MPSCs as follows. Note that this optimality conditions are presented in \cite{LiGuo,LY,Patric} for the smooth case and in \cite{Gorgini,Gorgini2} for the nonsmooth case, using other methods.
\begin{theorem}\label{KKT}
	Let $ \hat{x} $ be an optimal solution of $(\Delta)$.
\begin{enumerate}
  \item If GCQ holds at $\hat x$, then  we can find some coefficients $\l_j$, $\a_i$, and $\b_i$ as $j\in J_0$ and $i\in I$, such that:
\begin{equation}\label{S1}
\left\{
  \begin{array}{ll}
   &\nabla f(\hat{x}) +\displaystyle \sum_{j=1}^{|J_0|}\l_{j} \nabla g_{j}(\hat{x}) +\displaystyle \sum_{i=1}^{|I|} \Big(\a_{i}\nabla G_{i}(\hat{x})+ \b_{i} \nabla H_{i}(\hat{x})\Big)=0_n, \\
   &\l_j\geq 0,\ \ \text{ for } j\in J_{0}, \\
   & \ \\
    & \a_i=0,\ \ \text{ for } i\in I_{H}\cup  I_{GH}, \\
& \ \\
  &\b_i=0,\ \ \text{ for } i\in I_{G}\cup  I_{GH}.
  \end{array}
\right.
  \end{equation}
  \item If WGCQ holds at $\hat x$, then  we can find some coefficients $\l_j$, $\a_i$, and $\b_i$ as $j\in J_0$ and $i\in I$, such that:
\begin{equation}\label{S2}
\left\{
  \begin{array}{ll}
   &\nabla f(\hat{x}) +\displaystyle \sum_{j=1}^{|J_0|}\l_{j} \nabla g_{j}(\hat{x}) +\displaystyle \sum_{i=1}^{|I|} \Big(\a_{i}\nabla G_{i}(\hat{x})+ \b_{i} \nabla H_{i}(\hat{x})\Big)=0_n, \\
   &\l_j\geq 0,\ \ \text{ for } j\in J_{0}, \\
   & \ \\
    & \a_i=0,\ \ \text{ for } i\in I_{H}, \\
& \ \\
  &\b_i=0,\ \ \text{ for } i\in I_{G}.
  \end{array}
\right.
  \end{equation}
  \item If MPSC-GCQ holds at $\hat x$, then  we can find some coefficients $\l_j$, $\a_i$, and $\b_i$ as $j\in J_0$ and $i\in I$, such that:
\begin{equation}\label{S3}
\left\{
  \begin{array}{ll}
   &\nabla f(\hat{x}) +\displaystyle \sum_{j=1}^{|J_0|}\l_{j} \nabla g_{j}(\hat{x}) +\displaystyle \sum_{i=1}^{|I|} \Big(\a_{i}\nabla G_{i}(\hat{x})+ \b_{i} \nabla H_{i}(\hat{x})\Big)=0_n, \\
   &\l_j\geq 0,\ \ \text{ for } j\in J_{0}, \\
   & \ \\
    & \a_i=0,\ \ \text{ for } i\in I_{H}, \\
& \ \\
  &\b_i=0,\ \ \text{ for } i\in I_{G},\\
  & \ \\
  &\a_i\b_i=0,\ \ \text{ for } i\in   I_{GH}.
  \end{array}
\right.
  \end{equation}
\end{enumerate}
\end{theorem}
\begin{proof}
It is enough that we prove (\ref{S1}), and the proofs of (\ref{S2}) $\&$ (\ref{S3}) are similar.   According to (\ref{9530}) and Theorem \ref{E32}, we deduce that
\begin{align*}
  -\nabla f(\h x) &\in  N_F(\mathcal{S},\h x) \subseteq \\
  & \bigg\{\sum_{j=1}^{|J_0|}\l_j\nabla g_j(\h x)+\sum_{i=1}^{|I|}\Big(\a_i\nabla G_i(\h x)+\b_i\nabla H_i(\h x)\Big)\left | \begin{array}{ll}
                   \l_j \geq 0,\ & j\in J_0\\
                    \a_i =0,\ & i\in I_{H} \cup I_{GH}\\
                   \b_i=0,\ &  i\in I_{G} \cup I_{GH}
                 \end{array}
                 \right.\bigg\}.
\end{align*}
Hence, there exist some scalars $\l_{i}$, $\a_i$, and $\b_i$ as $i\in I$ and $j\in J$, satisfying (\ref{S1}). 
\end{proof}\\
It is worth mentioning that  condition  (\ref{S1}) (res: (\ref{S2}) and (\ref{S3})) is referred in \cite{Gorgini,Gorgini2,LiGuo,LY,Patric} by  ``strongly stationarity condition" (res: ``weakly stationarity condition" and ``M-stationarity condition")   at $\h x$. The difference between these three stationary conditions is that  the multipliers  $\a_i$ and $\b_i$ as   $i\in I_{GH}$  in  M-stationarity  are freer than in strong stationarity, and in weakly stationarity are freer than M-stationarity (‘‘M’’ stands for Mordukhovich). Several examples that show the comparison between these three kinds of stationary conditions can be seen in \cite{Gorgini,Patric}.

\section{Conclusion}
In this paper, we derived three kinds of Guignard type constraint qualifications as well as optimality conditions, named (weakly, strongly, M-) stationary conditions,  for the mathematical programming with switching constraints involving continuously differentiable functions. The main results were focused  on  upper estimating the Fr\`{e}chet normal cone of the feasible set of the considered problem.

%%%%%%%%%%%%%%%%
\vspace{4mm}\noindent{\bf Acknowledgements}\\
\noindent The authors are very grateful to the referees for their constructive
comments. 


% BibTeX users please use one of
%\bibliographystyle{spbasic}      % basic style, author-year citations
%\bibliographystyle{spmpsci}      % mathematics and physical sciences
%\bibliographystyle{spphys}       % APS-like style for physics
%\bibliography{}   % name your BibTeX data base

% Non-BibTeX users please use
\begin{center}
\begin{thebibliography}{99} % Enter references in alphabetical order and according to the following format.


\bibitem{AMN} A. Ansari Ardali, N. Movahedian, S. Nobakhtian, Optimality conditions for nonsmooth mathematical
programs with equilibrium constraints using convexificators, Optimization, 65 (2014), 67-85 .

%\bibitem{BOSH}  Bonnans JF, Shapiro A (2000) Perturbation Analysis of Optimization Problems. New York. Springer.

%\bibitem{CL}  Clarke FH (1983) Optimization and nonsmooth analysis. Wiley, Interscience.



%\bibitem{Gorgini}  Gorgini Shabankareh F, Kanzi N, Fallahi K, and Izadi J (2022) Stationarity in Nonsmooth Optimization with
%Switching Constraints. Iranian Journal of Science and Technology, Transactions A: Science. DOI: 10.1007/s40995-022-01289-3.

%\bibitem{GGT}  Giorgi, G., Gwirraggio, A., Thierselder, J.: Mathematics of Optimization; Smooth and Nonsmooth cases. Elsivier.(2004)

\bibitem{Gorgini} F. Gorgini Shabankareh, N. Kanzi, K. Fallahi, J. Izadi, Stationarity in nonsmooth optimization with
switching constraints. Iranian Journal of Science and Technology, Transactions A: Science, (2022), 1-9.


\bibitem{Gorgini2}  F. Gorgini Shabankareh, N. Kanzi , J. Izadi, K. Fallahi Guignard Qualifications and Stationary Conditions for Mathematical Programming with Nonsmooth Switching Constraints. Control and Optimization in Applied Mathematics, 6 (2021), 23-35.

\bibitem{HoKa2}   T. Hoheisel, C. Kanzow, Stationarity conditions for mathematical programs with vanishing constraints	using weak constraint qualifications, Journal of Mathematical Analysis and Application, 337 (2008), 292-310.

\bibitem{HoKa3} T. Hoheisel, C. Kanzow, On the Abadie and Guignard constraint qualifications for mathematical programs with vanishing constraints, Optimization, 58 (2009), 431-448.

\bibitem{KM} C. Kanzow, P. Mehlitz and D. Steck, Relaxation schemes for mathematical programs with switching constraints, Optimization Methods and Software, (2019) 1-36.

\bibitem{KazKan}S. Kazemi, N. Kanzi, Constraint qualifications and stationary conditions for mathematical programming with non-differentiable vanishing constraints. Journal of Optimization Theory and Applications, 179 (2018),  800-819.

\bibitem{KazKanEb} S. Kazemi, N. Kanzi, A. Ebadian, Estimating the Frechet Normal Cone in Optimization Problems with Nonsmooth Vanishing Constraints. Iranian Journal of Science and Technology, Transactions A: Science 43 (2019), 2299-2306.


\bibitem{LiGuo}G. Li , L. Guo,  Mordukhovich stationarity for mathematical programs with switching constraints under weak constraint qualifications,
Optimization, (2022) DOI: 10.1080/02331934.2022.2038151.

\bibitem{LY}YC. Liang, JJ. Ye, Optimality conditions and exact penalty for mathematical programs with switching constraints,
Journal of Optimization Theory and Applications, 191 (2021), 1-31.

%\bibitem{LG} Li G,  Guo L (2019) Mordukhovich stationarity for mathematical programs with switching constraints under weak constraint qualifications. Technical report, Optimization Online..

\bibitem{LvPeWa}J. Lv, Z. Peng  and Z. Wan, Optimality Conditions, Qualifications and Approximation
 Method for a Class of Non-Lipschitz Mathematical Programs with Switching Constraints, Mathematics, (2016) DOI: 10.3390/math9222915.



%\bibitem{MOR1} Mordukhovich BS (2006) Variational analysis and generalized differentiation I: Basic theory. Springer Science \& Business Media; 2006 Aug 8.

\bibitem{Patric}P. Mehlitz, Stationarity conditions and constraint qualifications for mathematical programs with switching constraints, Mathematical Programming, 181 (2020), 149-186.



 %\bibitem{MUT} Mishra SK,  Upadhyay BB, and Thi Hoai L An (2014) Lagrange Multiplier Characterizations of Solution Sets of
 %Constrained Nonsmooth Pseudolinear Optimization Problems. Journal of Optimization Theory and Applications 160:  763–777.

\bibitem{M} N. Movahedian, Bounded Lagrange multiplier rules for general nonsmooth problems and application
to mathematical programs with equilibrium constraints, Journal of  Global Optimization, 67 (2017), 829-850.

\bibitem{PM}Y. Pandey , SK. Mishra, Duality for nonsmooth optimization problems with equilibrium constraints,
using convexificators, Journal of Optimization Theory and Applications, 171 (2016), 694-707.

\bibitem{PN} Y. Pandey, V. Singh, On constraint qualifications for multiobjective optimization problems with switching constraints,
V. Laha et al. (Eds.), \emph{Optimization, Variational Analysis and Applications}, 283-306, Springer Proceedings in Mathematics \& Statistics, (2021) 

%\bibitem{Roc70}   Rockafellar RT (1970)  Convex Analysis.  Princeton University Press, Princeton.

\bibitem{ROCKA} RT. Rockafellar, B. Wets, Variational Analysis, Springer, Berlin (1998)

%\bibitem{SCH} Scholtes S (2004)  Nonconvex structures in nonlinear programming. Operation Research 52: 368-383.

\bibitem{Shikhman} V. Shikhman   Topological approach to mathematical programs with switching constraints. Set-Valued and Variational Analysis  30 (2022), 335-354.





\end{thebibliography}

\end{center}



{\small

\noindent{\bf Zahra Jafariani }

%\noindent Department of Mathematics

\noindent Ph.D .Candidate  of Mathematics

\noindent Department of Mathematics

\noindent Payame Noor University

\noindent P.O. Box 19395-3697

\noindent Tehran, Iran 

\noindent E-mail: z.jafariani@student.pnu.ac.ir}\\

{\small
\noindent{\bf Nader Kanzi   }

%\noindent  Department of Mathematics

\noindent Associate Professor of Mathematics

\noindent Department of Mathematics

\noindent Payame Noor University

\noindent P.O. Box 19395-3697

\noindent Tehran, Iran 

\noindent E-mail: n.kanzi@pnu.ac.ir }\\
%%%%%%%%%%%%%%%%%
{\small

\noindent{\bf Maryam Naderi Parizi }

%\noindent Department of Mathematics

\noindent Assistant Professor of Mathematics

\noindent Department of Mathematics

\noindent Payame Noor University
 
\noindent P.O. Box 19395-3697

\noindent Tehran, Iran 

\noindent E-mail: M.Naderi.Parizi@pnu.ac.ir }\\


\end{document}